diff --git a/Makefile b/Makefile index 6aa5a2c6f7f0..010807100447 100644 --- a/Makefile +++ b/Makefile @@ -1126,3 +1126,9 @@ get-dependency-version: .PHONY: _update-all _update-all: @(cd hack && go run update/update_all/update_all.go) + + + +# targets for tests on prow +include ./hack/prow/prow.mk + diff --git a/build-log.txt b/build-log.txt new file mode 100644 index 000000000000..98273661fe58 --- /dev/null +++ b/build-log.txt @@ -0,0 +1,47759 @@ +Docker in Docker enabled, initializing... +================================================================================ +net.ipv6.conf.all.disable_ipv6 = 0 +net.ipv6.conf.all.forwarding = 1 +Starting Docker: docker. +Waiting for docker to be ready, sleeping for 1 seconds. +================================================================================ +Done setting up docker in docker. +Activated service account credentials for: [prow-build@k8s-infra-prow-build.iam.gserviceaccount.com] ++ WRAPPED_COMMAND_PID=269 ++ wait 269 ++ make integration-prow-docker-docker-linux-x86-64 +./hack/prow/minikube_cross_build.sh 1.24.6 linux amd64 ++ GO_VERSION=1.24.6 ++ OS=linux ++ ARCH=amd64 ++ readonly OS_ARCH=linux-amd64 ++ OS_ARCH=linux-amd64 +++ pwd ++ echo 'running build in /home/prow/go/src/k8s.io/minikube, current pr number: 21807' +running build in /home/prow/go/src/k8s.io/minikube, current pr number: 21807 ++ declare -rx BUILD_IN_DOCKER=y ++ make -j 16 out/minikube-linux-amd64 out/e2e-linux-amd64 out/gvisor-addon +make[1]: Entering directory '/home/prow/go/src/k8s.io/minikube' +go: downloading go1.24.6 (linux/amd64) +docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user 0:0 -w /app -v /home/prow/go/src/k8s.io/minikube:/app:Z -v /home/prow/go:/go:Z --init registry.k8s.io/build-image/kube-cross:v1.34.0-go1.24.6-bullseye.0 /bin/bash -c '/usr/bin/make out/minikube-linux-amd64' +GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o out/gvisor-addon cmd/gvisor/gvisor.go +Unable to find image 'registry.k8s.io/build-image/kube-cross:v1.34.0-go1.24.6-bullseye.0' locally +go: downloading github.com/pkg/errors v0.9.1 +go: downloading github.com/minikube-machine/machine v0.0.0-20240815173309-ffb6b643c381 +v1.34.0-go1.24.6-bullseye.0: Pulling from build-image/kube-cross +9047683c7b5b: Pulling fs layer +2be42d221bda: Pulling fs layer +54d1704eb717: Pulling fs layer +728e3131ad6f: Pulling fs layer +9a940d118df6: Pulling fs layer +756f913ff3a2: Pulling fs layer +4f4fb700ef54: Pulling fs layer +9f0be6079725: Pulling fs layer +6c2282b81dac: Pulling fs layer +67657b9d9403: Pulling fs layer +8152e0c13168: Pulling fs layer +b3fc127fc0ef: Pulling fs layer +151818516915: Pulling fs layer +a042171a0f4f: Pulling fs layer +728e3131ad6f: Waiting +9a940d118df6: Waiting +756f913ff3a2: Waiting +4f4fb700ef54: Waiting +9f0be6079725: Waiting +6c2282b81dac: Waiting +67657b9d9403: Waiting +8152e0c13168: Waiting +b3fc127fc0ef: Waiting +151818516915: Waiting +a042171a0f4f: Waiting +2be42d221bda: Verifying Checksum +2be42d221bda: Download complete +9047683c7b5b: Download complete +728e3131ad6f: Verifying Checksum +728e3131ad6f: Download complete +756f913ff3a2: Verifying Checksum +756f913ff3a2: Download complete +4f4fb700ef54: Download complete +9f0be6079725: Verifying Checksum +9f0be6079725: Download complete +9a940d118df6: Verifying Checksum +9a940d118df6: Download complete +6c2282b81dac: Verifying Checksum +6c2282b81dac: Download complete +8152e0c13168: Verifying Checksum +8152e0c13168: Download complete +b3fc127fc0ef: Verifying Checksum +b3fc127fc0ef: Download complete +151818516915: Verifying Checksum +151818516915: Download complete +9047683c7b5b: Pull complete +a042171a0f4f: Verifying Checksum +a042171a0f4f: Download complete +2be42d221bda: Pull complete +67657b9d9403: Verifying Checksum +67657b9d9403: Download complete +54d1704eb717: Verifying Checksum +54d1704eb717: Download complete +54d1704eb717: Pull complete +728e3131ad6f: Pull complete +9a940d118df6: Pull complete +756f913ff3a2: Pull complete +4f4fb700ef54: Pull complete +9f0be6079725: Pull complete +6c2282b81dac: Pull complete +67657b9d9403: Pull complete +8152e0c13168: Pull complete +b3fc127fc0ef: Pull complete +151818516915: Pull complete +a042171a0f4f: Pull complete +Digest: sha256:2673a717948d5df5a2fad1e7d6734d5e05f7213628cf05fb105ee81d9d9faa59 +Status: Downloaded newer image for registry.k8s.io/build-image/kube-cross:v1.34.0-go1.24.6-bullseye.0 +GOOS="linux" GOARCH="amd64" \ +go build -tags "libvirt_dlopen" -ldflags="-X k8s.io/minikube/pkg/version.version=v1.37.0 -X k8s.io/minikube/pkg/version.isoVersion=v1.37.0-1761658712-21800 -X k8s.io/minikube/pkg/version.gitCommitID="e2222ae36f11d3515cb4a1cbfbc513a974c210e6" -X k8s.io/minikube/pkg/version.storageProvisionerVersion=v5" -a -o out/minikube-linux-amd64 k8s.io/minikube/cmd/minikube +go: downloading github.com/docker/cli v28.4.0+incompatible +go: downloading golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c +go: downloading github.com/google/slowjam v1.1.2 +go: downloading github.com/pkg/profile v1.7.0 +go: downloading github.com/spf13/pflag v1.0.9 +go: downloading k8s.io/klog/v2 v2.130.1 +go: downloading github.com/go-logr/logr v1.4.3 +go: downloading github.com/Delta456/box-cli-maker/v2 v2.3.0 +go: downloading github.com/blang/semver/v4 v4.0.0 +go: downloading github.com/docker/go-connections v0.6.0 +go: downloading github.com/google/go-containerregistry v0.20.6 +go: downloading github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b +go: downloading github.com/mitchellh/go-ps v1.0.0 +go: downloading github.com/shirou/gopsutil/v3 v3.24.5 +go: downloading github.com/spf13/cobra v1.9.1 +go: downloading github.com/spf13/viper v1.20.1 +go: downloading golang.org/x/text v0.28.0 +go: downloading gopkg.in/yaml.v2 v2.4.0 +go: downloading k8s.io/api v0.33.4 +go: downloading k8s.io/apimachinery v0.33.4 +go: downloading k8s.io/client-go v0.33.4 +go: downloading k8s.io/kubectl v0.33.4 +go: downloading github.com/otiai10/copy v1.14.1 +go: downloading github.com/distribution/reference v0.6.0 +go: downloading github.com/docker/docker v28.3.3+incompatible +go: downloading github.com/docker/go-units v0.5.0 +go: downloading github.com/juju/mutex/v2 v2.0.0 +go: downloading github.com/olekukonko/tablewriter v1.0.9 +go: downloading golang.org/x/sync v0.16.0 +go: downloading github.com/briandowns/spinner v1.23.2 +go: downloading github.com/mattn/go-isatty v0.0.20 +go: downloading github.com/felixge/fgprof v0.9.3 +go: downloading github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 +go: downloading github.com/moby/term v0.5.0 +go: downloading golang.org/x/crypto v0.41.0 +go: downloading github.com/mitchellh/go-homedir v1.1.0 +go: downloading github.com/opencontainers/go-digest v1.0.0 +go: downloading github.com/tklauser/go-sysconf v0.3.12 +go: downloading golang.org/x/sys v0.37.0 +go: downloading github.com/gookit/color v1.5.2 +go: downloading github.com/huandu/xstrings v1.3.2 +go: downloading github.com/mattn/go-runewidth v0.0.16 +go: downloading github.com/muesli/reflow v0.3.0 +go: downloading github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e +go: downloading golang.org/x/term v0.36.0 +go: downloading github.com/fsnotify/fsnotify v1.8.0 +go: downloading github.com/go-viper/mapstructure/v2 v2.4.0 +go: downloading github.com/sagikazarmark/locafero v0.7.0 +go: downloading github.com/spf13/afero v1.12.0 +go: downloading github.com/spf13/cast v1.7.1 +go: downloading github.com/containerd/errdefs v1.0.0 +go: downloading github.com/fvbommel/sortorder v1.1.0 +go: downloading github.com/moby/sys/atomicwriter v0.1.0 +go: downloading golang.org/x/oauth2 v0.30.0 +go: downloading github.com/google/go-github/v74 v74.0.0 +go: downloading golang.org/x/mod v0.27.0 +go: downloading github.com/opencontainers/cgroups v0.0.4 +go: downloading github.com/google/uuid v1.6.0 +go: downloading github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +go: downloading github.com/cloudevents/sdk-go/v2 v2.16.0 +go: downloading github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0 +go: downloading github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 +go: downloading github.com/klauspost/cpuid v1.2.0 +go: downloading cloud.google.com/go/storage v1.56.1 +go: downloading github.com/cheggaaa/pb/v3 v3.1.7 +go: downloading github.com/hashicorp/go-getter v1.8.0 +go: downloading google.golang.org/api v0.248.0 +go: downloading github.com/VividCortex/godaemon v1.0.0 +go: downloading github.com/Xuanwo/go-locale v1.1.3 +go: downloading github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 +go: downloading github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.29.0 +go: downloading go.opentelemetry.io/otel v1.38.0 +go: downloading go.opentelemetry.io/otel/sdk v1.37.0 +go: downloading go.opentelemetry.io/otel/trace v1.38.0 +go: downloading github.com/juju/clock v1.1.1 +go: downloading github.com/cenkalti/backoff/v4 v4.3.0 +go: downloading github.com/moby/patternmatcher v0.6.0 +go: downloading k8s.io/cluster-bootstrap v0.33.4 +go: downloading k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 +go: downloading cloud.google.com/go v0.121.6 +go: downloading github.com/sayboras/dockerclient v1.0.0 +go: downloading github.com/juju/errors v0.0.0-20220203013757-bd733f3c86b9 +go: downloading github.com/olekukonko/errors v1.1.0 +go: downloading github.com/olekukonko/ll v0.0.9 +go: downloading github.com/fatih/color v1.18.0 +go: downloading github.com/minikube-machine/machine-driver-vmware v0.1.6-0.20230701123042-a391c48b14d5 +go: downloading github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db +go: downloading github.com/sirupsen/logrus v1.9.3 +go: downloading github.com/MakeNowJust/heredoc v1.0.0 +go: downloading github.com/mitchellh/go-wordwrap v1.0.1 +go: downloading github.com/russross/blackfriday/v2 v2.1.0 +go: downloading github.com/docker/distribution v2.8.3+incompatible +go: downloading github.com/gogo/protobuf v1.3.2 +go: downloading github.com/tklauser/numcpus v0.6.1 +go: downloading github.com/rivo/uniseg v0.4.7 +go: downloading github.com/subosito/gotenv v1.6.0 +go: downloading github.com/pelletier/go-toml/v2 v2.2.3 +go: downloading gopkg.in/yaml.v3 v3.0.1 +go: downloading github.com/docker/docker-credential-helpers v0.9.3 +go: downloading github.com/sourcegraph/conc v0.3.0 +go: downloading sigs.k8s.io/randfill v1.0.0 +go: downloading github.com/hooklift/iso9660 v1.0.0 +go: downloading github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +go: downloading golang.org/x/net v0.43.0 +go: downloading github.com/containerd/errdefs/pkg v0.3.0 +go: downloading github.com/opencontainers/image-spec v1.1.1 +go: downloading go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 +go: downloading k8s.io/component-base v0.33.4 +go: downloading cloud.google.com/go/compute/metadata v0.8.0 +go: downloading github.com/moby/sys/sequential v0.6.0 +go: downloading github.com/moby/docker-image-spec v1.3.1 +go: downloading github.com/containerd/stargz-snapshotter/estargz v0.16.3 +go: downloading github.com/coreos/go-systemd/v22 v22.5.0 +go: downloading github.com/cyphar/filepath-securejoin v0.4.1 +go: downloading github.com/moby/sys/mountinfo v0.7.2 +go: downloading github.com/moby/sys/userns v0.1.0 +go: downloading sigs.k8s.io/structured-merge-diff/v4 v4.6.0 +go: downloading github.com/VividCortex/ewma v1.2.0 +go: downloading github.com/mattn/go-colorable v0.1.14 +go: downloading github.com/aws/aws-sdk-go-v2 v1.36.3 +go: downloading github.com/aws/aws-sdk-go-v2/config v1.29.15 +go: downloading github.com/aws/aws-sdk-go-v2/credentials v1.17.68 +go: downloading github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 +go: downloading github.com/aws/aws-sdk-go-v2/service/s3 v1.80.1 +go: downloading github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d +go: downloading github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.65 +go: downloading github.com/hashicorp/go-cleanhttp v0.5.2 +go: downloading github.com/hashicorp/go-safetemp v1.0.0 +go: downloading github.com/hashicorp/go-version v1.6.0 +go: downloading github.com/klauspost/compress v1.18.0 +go: downloading github.com/ulikunitz/xz v0.5.15 +go: downloading cloud.google.com/go/trace v1.11.6 +go: downloading github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 +go: downloading google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c +go: downloading google.golang.org/grpc v1.74.2 +go: downloading google.golang.org/genproto v0.0.0-20250603155806-513f23925822 +go: downloading google.golang.org/protobuf v1.36.7 +go: downloading github.com/moby/go-archive v0.1.0 +go: downloading github.com/google/go-querystring v1.1.0 +go: downloading cloud.google.com/go/auth v0.16.5 +go: downloading cloud.google.com/go/iam v1.5.2 +go: downloading github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 +go: downloading github.com/googleapis/gax-go/v2 v2.15.0 +go: downloading go.opentelemetry.io/contrib/detectors/gcp v1.36.0 +go: downloading go.opentelemetry.io/otel/sdk/metric v1.36.0 +go: downloading libvirt.org/go/libvirt v1.11006.0 +go: downloading github.com/aregm/cpuid v0.0.0-20181003105527-1a4a6f06a1c6 +go: downloading go.opentelemetry.io/otel/metric v1.38.0 +go: downloading k8s.io/cli-runtime v0.33.4 +go: downloading gopkg.in/inf.v0 v0.9.1 +go: downloading sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 +go: downloading github.com/google/gnostic-models v0.6.9 +go: downloading golang.org/x/time v0.12.0 +go: downloading github.com/fxamacker/cbor/v2 v2.7.0 +go: downloading github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc +go: downloading github.com/c4milo/gotoolkit v0.0.0-20190525173301-67483a18c17a +go: downloading github.com/felixge/httpsnoop v1.0.4 +go: downloading github.com/vbatts/tar-split v0.12.1 +go: downloading github.com/godbus/dbus/v5 v5.1.0 +go: downloading sigs.k8s.io/yaml v1.4.0 +go: downloading go.uber.org/zap v1.27.0 +go: downloading github.com/json-iterator/go v1.1.12 +go: downloading github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 +go: downloading github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 +go: downloading github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 +go: downloading github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 +go: downloading github.com/aws/smithy-go v1.22.3 +go: downloading google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c +go: downloading github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 +go: downloading github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 +go: downloading github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 +go: downloading github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 +go: downloading github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 +go: downloading github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 +go: downloading github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 +go: downloading github.com/moby/sys/user v0.4.0 +go: downloading github.com/containerd/log v0.1.0 +go: downloading cloud.google.com/go/monitoring v1.24.2 +go: downloading github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 +go: downloading cloud.google.com/go/auth/oauth2adapt v0.2.8 +go: downloading github.com/go-logr/stdr v1.2.2 +go: downloading go.opentelemetry.io/auto/sdk v1.1.0 +go: downloading github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 +go: downloading k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff +go: downloading github.com/google/go-cmp v0.7.0 +go: downloading github.com/x448/float16 v0.8.4 +go: downloading gopkg.in/evanphx/json-patch.v4 v4.12.0 +go: downloading github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd +go: downloading github.com/modern-go/reflect2 v1.0.2 +go: downloading go.uber.org/multierr v1.11.0 +go: downloading github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de +go: downloading go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 +go: downloading github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 +go: downloading github.com/google/s2a-go v0.1.9 +go: downloading github.com/envoyproxy/go-control-plane/envoy v1.32.4 +go: downloading github.com/googleapis/enterprise-certificate-proxy v0.3.6 +go: downloading github.com/prometheus/client_golang v1.22.0 +go: downloading github.com/prometheus/client_model v0.6.1 +go: downloading github.com/prometheus/procfs v0.15.1 +go: downloading github.com/spiffe/go-spiffe/v2 v2.5.0 +go: downloading github.com/cespare/xxhash/v2 v2.3.0 +go: downloading github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 +go: downloading github.com/moby/spdystream v0.5.0 +go: downloading github.com/beorn7/perks v1.0.1 +go: downloading github.com/prometheus/common v0.62.0 +go: downloading github.com/go-jose/go-jose/v4 v4.0.5 +go: downloading github.com/zeebo/errs v1.4.0 +go: downloading github.com/go-openapi/swag v0.23.0 +go: downloading github.com/go-openapi/jsonreference v0.20.2 +go: downloading github.com/envoyproxy/protoc-gen-validate v1.2.1 +go: downloading github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f +go: downloading github.com/emicklei/go-restful/v3 v3.11.0 +go: downloading cel.dev/expr v0.24.0 +go: downloading github.com/mailru/easyjson v0.7.7 +go: downloading github.com/go-openapi/jsonpointer v0.21.0 +go: downloading github.com/josharian/intern v1.0.0 +GOOS="linux" GOARCH="amd64" go test -ldflags="-X k8s.io/minikube/pkg/version.version=v1.37.0 -X k8s.io/minikube/pkg/version.isoVersion=v1.37.0-1761658712-21800 -X k8s.io/minikube/pkg/version.gitCommitID="e2222ae36f11d3515cb4a1cbfbc513a974c210e6" -X k8s.io/minikube/pkg/version.storageProvisionerVersion=v5" -c k8s.io/minikube/test/integration --tags="integration libvirt_dlopen" -o out/e2e-linux-amd64 +go: downloading github.com/elazarl/goproxy v1.7.2 +go: downloading github.com/hashicorp/go-retryablehttp v0.7.8 +go: downloading golang.org/x/build v0.0.0-20190927031335-2835ba2e683f +make[1]: Leaving directory '/home/prow/go/src/k8s.io/minikube' ++ failed=0 ++ export MINIKUBE_BIN=out/minikube-linux-amd64 ++ MINIKUBE_BIN=out/minikube-linux-amd64 ++ export E2E_BIN=out/e2e-linux-amd64 ++ E2E_BIN=out/e2e-linux-amd64 ++ chmod +x out/minikube-linux-amd64 out/e2e-linux-amd64 ++++ go env GOOS ++++ go env GOARCH +++ out/minikube-linux-amd64 version ++ BUILT_VERSION='minikube version: v1.37.0 +commit: e2222ae36f11d3515cb4a1cbfbc513a974c210e6' ++ echo minikube version: v1.37.0 commit: e2222ae36f11d3515cb4a1cbfbc513a974c210e6 +minikube version: v1.37.0 commit: e2222ae36f11d3515cb4a1cbfbc513a974c210e6 +++ echo minikube version: v1.37.0 commit: e2222ae36f11d3515cb4a1cbfbc513a974c210e6 +++ grep commit: +++ awk '{print $2}' ++ COMMIT=version: ++ echo version: ++ grep -q dirty ++ [[ 0 -ne 0 ]] +./hack/prow/util/integration_prow_wrapper.sh ./hack/prow/integration_docker_docker_linux_x86-64.sh ++ TARGET_SCRIPT=./hack/prow/integration_docker_docker_linux_x86-64.sh ++ ./hack/prow/util/run_with_minikube_user.sh ./hack/prow/integration_docker_docker_linux_x86-64.sh ++ NEW_USER=minikube ++ TARGET_SCRIPT=./hack/prow/integration_docker_docker_linux_x86-64.sh +++ whoami ++ '[' root == root ']' ++ useradd -m -s /bin/bash minikube ++ chown -R minikube:minikube . ++ apt-get update +Get:1 http://deb.debian.org/debian bookworm InRelease [151 kB] +Get:2 http://deb.debian.org/debian bookworm-updates InRelease [55.4 kB] +Get:3 https://download.docker.com/linux/debian bookworm InRelease [46.6 kB] +Get:4 http://deb.debian.org/debian-security bookworm-security InRelease [48.0 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 Packages [8791 kB] +Get:6 http://deb.debian.org/debian bookworm-updates/main amd64 Packages [6924 B] +Get:7 http://deb.debian.org/debian-security bookworm-security/main amd64 Packages [284 kB] +Get:8 https://download.docker.com/linux/debian bookworm/stable amd64 Packages [49.6 kB] +Fetched 9433 kB in 1s (10.8 MB/s) +Reading package lists... ++ apt-get install -y sudo +Reading package lists... +Building dependency tree... +Reading state information... +The following NEW packages will be installed: + sudo +0 upgraded, 1 newly installed, 0 to remove and 0 not upgraded. +Need to get 1890 kB of archives. +After this operation, 6199 kB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 sudo amd64 1.9.13p3-1+deb12u2 [1890 kB] +debconf: delaying package configuration, since apt-utils is not installed +Fetched 1890 kB in 0s (21.9 MB/s) +Selecting previously unselected package sudo. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 22947 files and directories currently installed.) +Preparing to unpack .../sudo_1.9.13p3-1+deb12u2_amd64.deb ... +Unpacking sudo (1.9.13p3-1+deb12u2) ... +Setting up sudo (1.9.13p3-1+deb12u2) ... +invoke-rc.d: could not determine current runlevel +invoke-rc.d: policy-rc.d denied execution of start. +Processing triggers for libc-bin (2.36-9+deb12u13) ... ++ echo 'minikube ALL=(ALL) NOPASSWD:ALL' ++ chmod 440 /etc/sudoers.d/minikube ++ usermod -aG docker minikube ++ su minikube -c ./hack/prow/integration_docker_docker_linux_x86-64.sh ++ OS=linux ++ ARCH=amd64 ++ DRIVER=docker ++ CONTAINER_RUNTIME=docker ++ EXTRA_START_ARGS= ++ EXTRA_TEST_ARGS= ++ JOB_NAME=Docker_Linux ++ git config --global --add safe.directory '*' +++ git rev-parse HEAD ++ COMMIT=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 ++ MINIKUBE_LOCATION=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 ++ source ./hack/prow/common.sh +++ readonly OS_ARCH=linux-amd64 +++ OS_ARCH=linux-amd64 +++ readonly TEST_ROOT=/home/minikube/minikube-integration +++ TEST_ROOT=/home/minikube/minikube-integration +++ readonly TEST_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863 +++ TEST_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863 +++ export GOPATH=/home/minikube/go +++ GOPATH=/home/minikube/go +++ export KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig +++ KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig +++ export PATH=/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +++ PATH=/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +++ export MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true +++ MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true +++ readonly TIMEOUT=120m +++ TIMEOUT=120m +++ cp -r test/integration/testdata . ++++ pwd +++ export PATH=/home/prow/go/src/k8s.io/minikube/out/:/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +++ PATH=/home/prow/go/src/k8s.io/minikube/out/:/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +++ mkdir -p /home/minikube/minikube-integration +++ mkdir -p /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863 +++ export MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube +++ MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube +++ export MINIKUBE_BIN=out/minikube-linux-amd64 +++ MINIKUBE_BIN=out/minikube-linux-amd64 +++ export E2E_BIN=out/e2e-linux-amd64 +++ E2E_BIN=out/e2e-linux-amd64 +++ install_dependencies ++++ uname +++ '[' Linux '!=' Darwin ']' +++ sudo apt-get -y install lsof psmisc dnsutils +Reading package lists... +Building dependency tree... +Reading state information... +The following additional packages will be installed: + bind9-dnsutils bind9-host bind9-libs libfstrm0 libjemalloc2 liblmdb0 + libmaxminddb0 libprotobuf-c1 libuv1 +Suggested packages: + mmdb-bin +The following NEW packages will be installed: + bind9-dnsutils bind9-host bind9-libs dnsutils libfstrm0 libjemalloc2 + liblmdb0 libmaxminddb0 libprotobuf-c1 libuv1 lsof psmisc +0 upgraded, 12 newly installed, 0 to remove and 0 not upgraded. +Need to get 2514 kB of archives. +After this operation, 7220 kB of additional disk space will be used. +Get:1 http://deb.debian.org/debian bookworm/main amd64 libuv1 amd64 1.44.2-1+deb12u1 [136 kB] +Get:2 http://deb.debian.org/debian bookworm/main amd64 libfstrm0 amd64 0.6.1-1 [21.6 kB] +Get:3 http://deb.debian.org/debian bookworm/main amd64 libjemalloc2 amd64 5.3.0-1 [275 kB] +Get:4 http://deb.debian.org/debian bookworm/main amd64 liblmdb0 amd64 0.9.24-1 [45.0 kB] +Get:5 http://deb.debian.org/debian bookworm/main amd64 libmaxminddb0 amd64 1.7.1-1 [29.8 kB] +Get:6 http://deb.debian.org/debian bookworm/main amd64 libprotobuf-c1 amd64 1.4.1-1+b1 [27.5 kB] +Get:7 http://deb.debian.org/debian-security bookworm-security/main amd64 bind9-libs amd64 1:9.18.41-1~deb12u1 [1181 kB] +Get:8 http://deb.debian.org/debian-security bookworm-security/main amd64 bind9-host amd64 1:9.18.41-1~deb12u1 [54.7 kB] +Get:9 http://deb.debian.org/debian-security bookworm-security/main amd64 bind9-dnsutils amd64 1:9.18.41-1~deb12u1 [155 kB] +Get:10 http://deb.debian.org/debian bookworm/main amd64 lsof amd64 4.95.0-1 [318 kB] +Get:11 http://deb.debian.org/debian-security bookworm-security/main amd64 dnsutils all 1:9.18.41-1~deb12u1 [11.2 kB] +Get:12 http://deb.debian.org/debian bookworm/main amd64 psmisc amd64 23.6-1 [259 kB] +debconf: delaying package configuration, since apt-utils is not installed +Fetched 2514 kB in 0s (24.2 MB/s) +Selecting previously unselected package libuv1:amd64. +(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 23088 files and directories currently installed.) +Preparing to unpack .../00-libuv1_1.44.2-1+deb12u1_amd64.deb ... +Unpacking libuv1:amd64 (1.44.2-1+deb12u1) ... +Selecting previously unselected package libfstrm0:amd64. +Preparing to unpack .../01-libfstrm0_0.6.1-1_amd64.deb ... +Unpacking libfstrm0:amd64 (0.6.1-1) ... +Selecting previously unselected package libjemalloc2:amd64. +Preparing to unpack .../02-libjemalloc2_5.3.0-1_amd64.deb ... +Unpacking libjemalloc2:amd64 (5.3.0-1) ... +Selecting previously unselected package liblmdb0:amd64. +Preparing to unpack .../03-liblmdb0_0.9.24-1_amd64.deb ... +Unpacking liblmdb0:amd64 (0.9.24-1) ... +Selecting previously unselected package libmaxminddb0:amd64. +Preparing to unpack .../04-libmaxminddb0_1.7.1-1_amd64.deb ... +Unpacking libmaxminddb0:amd64 (1.7.1-1) ... +Selecting previously unselected package libprotobuf-c1:amd64. +Preparing to unpack .../05-libprotobuf-c1_1.4.1-1+b1_amd64.deb ... +Unpacking libprotobuf-c1:amd64 (1.4.1-1+b1) ... +Selecting previously unselected package bind9-libs:amd64. +Preparing to unpack .../06-bind9-libs_1%3a9.18.41-1~deb12u1_amd64.deb ... +Unpacking bind9-libs:amd64 (1:9.18.41-1~deb12u1) ... +Selecting previously unselected package bind9-host. +Preparing to unpack .../07-bind9-host_1%3a9.18.41-1~deb12u1_amd64.deb ... +Unpacking bind9-host (1:9.18.41-1~deb12u1) ... +Selecting previously unselected package bind9-dnsutils. +Preparing to unpack .../08-bind9-dnsutils_1%3a9.18.41-1~deb12u1_amd64.deb ... +Unpacking bind9-dnsutils (1:9.18.41-1~deb12u1) ... +Selecting previously unselected package lsof. +Preparing to unpack .../09-lsof_4.95.0-1_amd64.deb ... +Unpacking lsof (4.95.0-1) ... +Selecting previously unselected package dnsutils. +Preparing to unpack .../10-dnsutils_1%3a9.18.41-1~deb12u1_all.deb ... +Unpacking dnsutils (1:9.18.41-1~deb12u1) ... +Selecting previously unselected package psmisc. +Preparing to unpack .../11-psmisc_23.6-1_amd64.deb ... +Unpacking psmisc (23.6-1) ... +Setting up liblmdb0:amd64 (0.9.24-1) ... +Setting up psmisc (23.6-1) ... +Setting up libmaxminddb0:amd64 (1.7.1-1) ... +Setting up libfstrm0:amd64 (0.6.1-1) ... +Setting up libjemalloc2:amd64 (5.3.0-1) ... +Setting up libprotobuf-c1:amd64 (1.4.1-1+b1) ... +Setting up libuv1:amd64 (1.44.2-1+deb12u1) ... +Setting up lsof (4.95.0-1) ... +Setting up bind9-libs:amd64 (1:9.18.41-1~deb12u1) ... +Setting up bind9-host (1:9.18.41-1~deb12u1) ... +Setting up bind9-dnsutils (1:9.18.41-1~deb12u1) ... +Setting up dnsutils (1:9.18.41-1~deb12u1) ... +Processing triggers for libc-bin (2.36-9+deb12u13) ... +++ sudo hack/prow/installer/check_install_golang.sh /usr/local 1.24.5 ++ (( 2 < 2 )) ++ VERSION_TO_INSTALL=1.24.5 ++ INSTALL_PATH=/usr/local +++ current_arch +++ case $(arch) in ++++ arch +++ echo amd64 ++ ARCH=amd64 ++ check_and_install_golang ++ go version ++ echo 'WARNING: No golang installation found in your environment.' +WARNING: No golang installation found in your environment. ++ install_golang 1.24.5 /usr/local ++ local -r GO_VER=1.24.5 ++ local -r GO_DIR=/usr/local/go ++ echo 'Installing golang version: 1.24.5 in /usr/local/go' +Installing golang version: 1.24.5 in /usr/local/go ++ INSTALLOS=linux ++ [[ linux-gnu == \d\a\r\w\i\n* ]] ++ local -r GO_TGZ=go1.24.5.linux-amd64.tar.gz ++ pushd /tmp +/tmp /home/prow/go/src/k8s.io/minikube ++ sudo rm -rf go1.24.5.linux-amd64.tar.gz ++ curl -qL -O https://storage.googleapis.com/golang/go1.24.5.linux-amd64.tar.gz + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 74.9M 100 74.9M 0 0 89.0M 0 --:--:-- --:--:-- --:--:-- 89.0M ++ sudo rm -rf /usr/local/go ++ sudo mkdir -p /usr/local/go ++ sudo tar -C /usr/local/go --strip-components=1 -xzf go1.24.5.linux-amd64.tar.gz ++ popd +++ /usr/local/go/bin/go version ++ echo 'installed in /usr/local/go: go version go1.24.5 linux/amd64' +installed in /usr/local/go: go version go1.24.5 linux/amd64 ++ return +++ GOROOT=/usr/local/go +++ hack/prow/installer/check_install_gotestsum.sh ++ echo 'Installing gotestsum' +Installing gotestsum ++ which gotestsum ++ install_gotestsum +++ which gotestsum ++ rm -f ++ sudo env GOBIN=/usr/local/go/bin PATH=/home/prow/go/src/k8s.io/minikube/out/:/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin go install gotest.tools/gotestsum@v1.12.3 +go: downloading gotest.tools/gotestsum v1.12.3 +go: downloading github.com/dnephin/pflag v1.0.7 +go: downloading golang.org/x/tools v0.34.0 +go: downloading github.com/fatih/color v1.18.0 +go: downloading github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 +go: downloading github.com/bitfield/gotestdox v0.2.2 +go: downloading golang.org/x/sync v0.15.0 +go: downloading golang.org/x/term v0.32.0 +go: downloading github.com/fsnotify/fsnotify v1.8.0 +go: downloading golang.org/x/sys v0.33.0 +go: downloading github.com/mattn/go-colorable v0.1.13 +go: downloading github.com/mattn/go-isatty v0.0.20 +go: downloading golang.org/x/text v0.17.0 +go: downloading golang.org/x/mod v0.25.0 +++ hack/prow/installer/check_install_gopogh.sh ++ go install github.com/medyagh/gopogh/cmd/gopogh@v0.29.0 +go: downloading go1.24.6 (linux/amd64) +go: downloading github.com/medyagh/gopogh v0.29.0 +go: downloading cloud.google.com/go/cloudsqlconn v1.12.1 +go: downloading github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0 +go: downloading github.com/jackc/pgx/v4 v4.18.3 +go: downloading github.com/jmoiron/sqlx v1.4.0 +go: downloading github.com/lib/pq v1.10.9 +go: downloading modernc.org/sqlite v1.33.1 +go: downloading github.com/jackc/pgconn v1.14.3 +go: downloading github.com/jackc/pgtype v1.14.0 +go: downloading github.com/jackc/pgio v1.0.0 +go: downloading github.com/jackc/pgproto3/v2 v2.3.3 +go: downloading github.com/google/uuid v1.6.0 +go: downloading golang.org/x/net v0.29.0 +go: downloading golang.org/x/oauth2 v0.23.0 +go: downloading google.golang.org/api v0.197.0 +go: downloading golang.org/x/time v0.6.0 +go: downloading go.opencensus.io v0.24.0 +go: downloading google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +go: downloading google.golang.org/grpc v1.66.2 +go: downloading github.com/jackc/chunkreader/v2 v2.0.1 +go: downloading github.com/jackc/pgpassfile v1.0.0 +go: downloading github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 +go: downloading golang.org/x/crypto v0.27.0 +go: downloading golang.org/x/text v0.18.0 +go: downloading cloud.google.com/go/compute/metadata v0.5.0 +go: downloading github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da +go: downloading google.golang.org/protobuf v1.34.2 +go: downloading go.uber.org/zap v1.27.0 +go: downloading golang.org/x/sys v0.25.0 +go: downloading go.uber.org/multierr v1.10.0 +go: downloading modernc.org/libc v1.55.3 +go: downloading cloud.google.com/go/auth v0.9.3 +go: downloading cloud.google.com/go/auth/oauth2adapt v0.2.4 +go: downloading github.com/google/s2a-go v0.1.8 +go: downloading go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 +go: downloading github.com/googleapis/gax-go/v2 v2.13.0 +go: downloading github.com/googleapis/enterprise-certificate-proxy v0.3.4 +go: downloading github.com/felixge/httpsnoop v1.0.4 +go: downloading go.opentelemetry.io/otel v1.29.0 +go: downloading go.opentelemetry.io/otel/metric v1.29.0 +go: downloading go.opentelemetry.io/otel/trace v1.29.0 +go: downloading github.com/go-logr/logr v1.4.2 +go: downloading github.com/go-logr/stdr v1.2.2 +go: downloading modernc.org/mathutil v1.6.0 +go: downloading modernc.org/memory v1.8.0 +go: downloading github.com/dustin/go-humanize v1.0.1 +go: downloading github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec +++ type jq +++ docker_setup +++ docker system prune -a --volumes -f +Deleted Images: +untagged: registry.k8s.io/build-image/kube-cross:v1.34.0-go1.24.6-bullseye.0 +untagged: registry.k8s.io/build-image/kube-cross@sha256:2673a717948d5df5a2fad1e7d6734d5e05f7213628cf05fb105ee81d9d9faa59 +deleted: sha256:0e69ad4355497a6ca325821588b1289dd2e31a01c554c70e63ea15946399551f +deleted: sha256:1a86dbc56d487e14359c7f83464c476e2a6eef1c46eadd4afbc662b23647d503 +deleted: sha256:538cf953fe0dc3813c7c91a450ae2958b37c831ca393b652c990b28cbaa2b6ce +deleted: sha256:4ed837507b6ba884bf9bd4d2c5372b1749dc3706afc648ed1f232ba26ef87005 +deleted: sha256:c3ba379a32c1d59d9cacfb864f0d2cf021020c9f8aae0f6ce35d1b17f1d92461 +deleted: sha256:086aed04c206915ed93d121a776018b53fc2b82a0518c16076be01c28db9cd97 +deleted: sha256:8ac3d3db506ff29b1034a155814bda3fb5e23ee96796a7af3ec6739633d4261a +deleted: sha256:f822f2b6fcc4c12868bf0ae0cb24cfa5e5fce160f8e52de23d7e17f288d109fa +deleted: sha256:59f5d83f412bafd0a3fa4f452867ddfec34fcdf0fa9bcf8ad1c61ce455404b1b +deleted: sha256:442c9027c2652fcc04de656313298037a9f0c3fd4f70de5a5609c219af673102 +deleted: sha256:ba2a5437035df3f6a7aa594fa20ab418ebd983068e31d4463f84d1ada41570b6 +deleted: sha256:46e7f151f717cf466d448fca0108ccd9c64a99c932c424193f3b7cfe42397904 +deleted: sha256:6f7fab02494a8a4de4d0c44fd9f5af9bb999d671883b43e48d40b28f5d1530ba +deleted: sha256:f82aeb5e4d992a8a3da8c08df9c2ff2221de505d7f5025cf1a095b5e95c1bc09 +deleted: sha256:0a8d7e16f2a8f6622150af93527e6606f4d77cd4c5781619da9813c4595be531 +deleted: sha256:5f8d90562f21b79e0afc8033676a6879be097c40fccf17edd2078735efc459a3 +deleted: sha256:7c070db41ec632bd0d46fc5b930e8927ed4dafbb76150adf0cf1201fc0fbb837 + +Total reclaimed space: 5.738GB +++ docker system df +TYPE TOTAL ACTIVE SIZE RECLAIMABLE +Images 0 0 0B 0B +Containers 0 0 0B 0B +Local Volumes 0 0 0B 0B +Build Cache 0 0 0B 0B ++++ docker ps -aq +++ docker rm -f -v +++ true +++ '[' docker == containerd ']' +++ print_test_info ++++ date +++ echo '>> Starting at Sun Nov 2 22:46:27 UTC 2025' +>> Starting at Sun Nov 2 22:46:27 UTC 2025 +++ echo '' + ++++ whoami +++ echo 'user: minikube' +user: minikube +++ echo 'arch: linux-amd64' +arch: linux-amd64 +++ echo 'pr: 21807' +pr: 21807 +++ echo 'driver: docker' +driver: docker +++ echo 'runtime: docker' +runtime: docker +++ echo 'job: Docker_Linux' +job: Docker_Linux +++ echo 'test home: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863' +test home: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863 ++++ uname -v +++ echo 'kernel: #1 SMP Fri Aug 22 11:53:37 UTC 2025' +kernel: #1 SMP Fri Aug 22 11:53:37 UTC 2025 ++++ uptime +++ echo 'uptime: 22:46:27 up 11 days, 23:45, 0 user, load average: 12.30, 5.69, 2.32' +uptime: 22:46:27 up 11 days, 23:45, 0 user, load average: 12.30, 5.69, 2.32 ++++ env KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863 kubectl version --client +++ echo 'kubectl: Client Version: v1.34.1 +Kustomize Version: v5.7.1' +kubectl: Client Version: v1.34.1 +Kustomize Version: v5.7.1 ++++ docker version --format '{{ .Client.Version }}' +++ echo 'docker: 28.5.1' +docker: 28.5.1 ++++ sudo podman version --format '{{.Version}}' +sudo: podman: command not found ++++ true +++ echo 'podman: ' +podman: ++++ go version +++ echo 'go: go version go1.24.6 linux/amd64' +go: go version go1.24.6 linux/amd64 +++ case "${DRIVER}" in +++ echo '' + +++ readonly TEST_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/testout.txt +++ TEST_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/testout.txt +++ readonly JSON_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.json +++ JSON_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.json +++ readonly JUNIT_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/junit-unit.xml +++ JUNIT_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/junit-unit.xml +++ readonly HTML_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.html +++ HTML_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.html +++ readonly SUMMARY_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test_summary.json +++ SUMMARY_OUT=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test_summary.json +++ touch /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/testout.txt +++ touch /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.json +++ touch /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/junit-unit.xml +++ touch /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.html +++ touch /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test_summary.json ++++ date -u +%s +++ e2e_start_time=1762123587 +++ echo '' + ++++ date +++ echo '>> Starting out/e2e-linux-amd64 at Sun Nov 2 22:46:27 UTC 2025' +>> Starting out/e2e-linux-amd64 at Sun Nov 2 22:46:27 UTC 2025 +++ set -x +++ EXTRA_START_ARGS=' --container-runtime=docker' +++ echo /home/prow/go/src/k8s.io/minikube/out/:/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +/home/prow/go/src/k8s.io/minikube/out/:/home/prow/go/bin:/go/bin:/usr/local/go/bin:/google-cloud-sdk/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/bin/:/usr/local/go/bin/:/home/minikube/go/bin +++ gotestsum --jsonfile /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/test.json --junitfile=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/junit-unit.xml -f standard-verbose --raw-command -- go tool test2json -t out/e2e-linux-amd64 '-minikube-start-args=--driver=docker --container-runtime=docker' -test.timeout=120m -test.v -binary=out/minikube-linux-amd64 +++ tee /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/testout.txt +Found 8 cores, limiting parallelism with --test.parallel=4 +=== RUN TestDownloadOnly +=== RUN TestDownloadOnly/v1.28.0 +=== RUN TestDownloadOnly/v1.28.0/json-events + aaa_download_only_test.go:80: (dbg) Run: out/minikube-linux-amd64 start -o=json --download-only -p download-only-012415 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker --container-runtime=docker + aaa_download_only_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-012415 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker --container-runtime=docker: (6.305960608s) +=== RUN TestDownloadOnly/v1.28.0/preload-exists +I1102 22:46:33.565235 37869 preload.go:183] Checking if preload exists for k8s version v1.28.0 and runtime docker +I1102 22:46:33.565308 37869 preload.go:198] Found local preload: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4 +=== RUN TestDownloadOnly/v1.28.0/cached-images + aaa_download_only_test.go:128: Preload exists, images won't be cached +=== RUN TestDownloadOnly/v1.28.0/binaries + aaa_download_only_test.go:150: Preload exists, binaries are present within. +=== RUN TestDownloadOnly/v1.28.0/kubectl + aaa_download_only_test.go:166: Test for darwin and windows +=== RUN TestDownloadOnly/v1.28.0/LogsDuration + aaa_download_only_test.go:183: (dbg) Run: out/minikube-linux-amd64 logs -p download-only-012415 + aaa_download_only_test.go:183: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-012415: exit status 85 (33.427665ms) + + -- stdout -- + + ==> Audit <== + ┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬──────────┬─────────┬─────────────────────┬──────────┐ + │ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │ + ├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼──────────┼─────────┼─────────────────────┼──────────┤ + │ start │ -o=json --download-only -p download-only-012415 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker --container-runtime=docker │ download-only-012415 │ minikube │ v1.37.0 │ 02 Nov 25 22:46 UTC │ │ + └─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴──────────┴─────────┴─────────────────────┴──────────┘ + + + ==> Last Start <== + Log file created at: 2025/11/02 22:46:27 + Running on machine: ec6b3253-b39b-4dea-b672-e2db97323995 + Binary: Built with gc go1.24.6 for linux/amd64 + Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg + I1102 22:46:27.285750 37881 out.go:360] Setting OutFile to fd 1 ... + I1102 22:46:27.285957 37881 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:46:27.285961 37881 out.go:374] Setting ErrFile to fd 2... + I1102 22:46:27.285964 37881 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:46:27.286100 37881 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + W1102 22:46:27.286174 37881 root.go:314] Error reading config file at /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/config/config.json: open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/config/config.json: no such file or directory + I1102 22:46:27.286424 37881 out.go:368] Setting JSON to true + I1102 22:46:27.295361 37881 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1035922,"bootTime":1761087666,"procs":17,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 22:46:27.295421 37881 start.go:143] virtualization: kvm guest + I1102 22:46:27.357758 37881 out.go:99] [download-only-012415] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + W1102 22:46:27.357882 37881 preload.go:349] Failed to list preload files: open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball: no such file or directory + I1102 22:46:27.357931 37881 notify.go:221] Checking for updates... + I1102 22:46:27.362167 37881 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 22:46:27.362340 37881 out.go:171] KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 22:46:27.362439 37881 out.go:171] MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 22:46:27.362577 37881 out.go:171] MINIKUBE_BIN=out/minikube-linux-amd64 + W1102 22:46:27.362833 37881 out.go:336] minikube skips various validations when --force is supplied; this may lead to unexpected behavior + I1102 22:46:27.363013 37881 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 22:46:27.377595 37881 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 22:46:27.377649 37881 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:46:27.471618 37881 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:0 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:50 SystemTime:2025-11-02 22:46:27.465162141 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:46:27.471680 37881 docker.go:319] overlay module found + I1102 22:46:27.471935 37881 out.go:99] Using the docker driver based on user configuration + I1102 22:46:27.471945 37881 start.go:309] selected driver: docker + I1102 22:46:27.471952 37881 start.go:930] validating driver "docker" against + I1102 22:46:27.472028 37881 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:46:27.502425 37881 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:0 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:50 SystemTime:2025-11-02 22:46:27.496846225 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:46:27.502498 37881 start_flags.go:327] no existing cluster config was found, will generate one from the flags + I1102 22:46:27.502847 37881 start_flags.go:410] Using suggested 15500MB memory alloc based on sys=62294MB, container=62294MB + I1102 22:46:27.503103 37881 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true] + I1102 22:46:27.503309 37881 out.go:171] Using Docker driver with root privileges + I1102 22:46:27.503437 37881 cni.go:84] Creating CNI manager for "" + I1102 22:46:27.503478 37881 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 22:46:27.503484 37881 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni + I1102 22:46:27.503513 37881 start.go:353] cluster config: + {Name:download-only-012415 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:15500 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.28.0 ClusterName:download-only-012415 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.28.0 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 22:46:27.503665 37881 out.go:99] Starting "download-only-012415" primary control-plane node in "download-only-012415" cluster + I1102 22:46:27.503671 37881 cache.go:124] Beginning downloading kic base image for docker with docker + I1102 22:46:27.503761 37881 out.go:99] Pulling base image v0.0.48-1760939008-21773 ... + I1102 22:46:27.503770 37881 preload.go:183] Checking if preload exists for k8s version v1.28.0 and runtime docker + I1102 22:46:27.503848 37881 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon + I1102 22:46:27.513405 37881 cache.go:153] Downloading gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 to local cache + I1102 22:46:27.513517 37881 image.go:65] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local cache directory + I1102 22:46:27.513592 37881 image.go:150] Writing gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 to local cache + I1102 22:46:27.565236 37881 preload.go:148] Found remote preload: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4 + I1102 22:46:27.565249 37881 cache.go:59] Caching tarball of preloaded images + I1102 22:46:27.565342 37881 preload.go:183] Checking if preload exists for k8s version v1.28.0 and runtime docker + I1102 22:46:27.565606 37881 out.go:99] Downloading Kubernetes v1.28.0 preload ... + I1102 22:46:27.565613 37881 preload.go:313] getting checksum for preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4 from gcs api... + I1102 22:46:27.597192 37881 preload.go:290] Got checksum from GCS API "8a955be835827bc584bcce0658a7fcc9" + I1102 22:46:27.597259 37881 download.go:108] Downloading: https://storage.googleapis.com/minikube-preloaded-volume-tarballs/v18/v1.28.0/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4?checksum=md5:8a955be835827bc584bcce0658a7fcc9 -> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.28.0-docker-overlay2-amd64.tar.lz4 + + + * The control-plane node download-only-012415 host does not exist + To start a cluster, run: "minikube start -p download-only-012415" + + -- /stdout -- + aaa_download_only_test.go:184: minikube logs failed with error: exit status 85 +=== RUN TestDownloadOnly/v1.28.0/DeleteAll + aaa_download_only_test.go:196: (dbg) Run: out/minikube-linux-amd64 delete --all +=== RUN TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds + aaa_download_only_test.go:207: (dbg) Run: out/minikube-linux-amd64 delete -p download-only-012415 +=== RUN TestDownloadOnly/v1.34.1 +=== RUN TestDownloadOnly/v1.34.1/json-events + aaa_download_only_test.go:80: (dbg) Run: out/minikube-linux-amd64 start -o=json --download-only -p download-only-398096 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=docker --container-runtime=docker + aaa_download_only_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -o=json --download-only -p download-only-398096 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=docker --container-runtime=docker: (3.870007642s) +=== RUN TestDownloadOnly/v1.34.1/preload-exists +I1102 22:46:37.663679 37869 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker +I1102 22:46:37.663717 37869 preload.go:198] Found local preload: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 +=== RUN TestDownloadOnly/v1.34.1/cached-images + aaa_download_only_test.go:128: Preload exists, images won't be cached +=== RUN TestDownloadOnly/v1.34.1/binaries + aaa_download_only_test.go:150: Preload exists, binaries are present within. +=== RUN TestDownloadOnly/v1.34.1/kubectl + aaa_download_only_test.go:166: Test for darwin and windows +=== RUN TestDownloadOnly/v1.34.1/LogsDuration + aaa_download_only_test.go:183: (dbg) Run: out/minikube-linux-amd64 logs -p download-only-398096 + aaa_download_only_test.go:183: (dbg) Non-zero exit: out/minikube-linux-amd64 logs -p download-only-398096: exit status 85 (34.018999ms) + + -- stdout -- + + ==> Audit <== + ┌─────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬──────────────────────┬──────────┬─────────┬─────────────────────┬─────────────────────┐ + │ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │ + ├─────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼──────────────────────┼──────────┼─────────┼─────────────────────┼─────────────────────┤ + │ start │ -o=json --download-only -p download-only-012415 --force --alsologtostderr --kubernetes-version=v1.28.0 --container-runtime=docker --driver=docker --container-runtime=docker │ download-only-012415 │ minikube │ v1.37.0 │ 02 Nov 25 22:46 UTC │ │ + │ delete │ --all │ minikube │ minikube │ v1.37.0 │ 02 Nov 25 22:46 UTC │ 02 Nov 25 22:46 UTC │ + │ delete │ -p download-only-012415 │ download-only-012415 │ minikube │ v1.37.0 │ 02 Nov 25 22:46 UTC │ 02 Nov 25 22:46 UTC │ + │ start │ -o=json --download-only -p download-only-398096 --force --alsologtostderr --kubernetes-version=v1.34.1 --container-runtime=docker --driver=docker --container-runtime=docker │ download-only-398096 │ minikube │ v1.37.0 │ 02 Nov 25 22:46 UTC │ │ + └─────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴──────────────────────┴──────────┴─────────┴─────────────────────┴─────────────────────┘ + + + ==> Last Start <== + Log file created at: 2025/11/02 22:46:33 + Running on machine: ec6b3253-b39b-4dea-b672-e2db97323995 + Binary: Built with gc go1.24.6 for linux/amd64 + Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg + I1102 22:46:33.816597 38196 out.go:360] Setting OutFile to fd 1 ... + I1102 22:46:33.816738 38196 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:46:33.816742 38196 out.go:374] Setting ErrFile to fd 2... + I1102 22:46:33.816745 38196 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:46:33.816855 38196 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:46:33.817080 38196 out.go:368] Setting JSON to true + I1102 22:46:33.823608 38196 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1035928,"bootTime":1761087666,"procs":17,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 22:46:33.823663 38196 start.go:143] virtualization: kvm guest + I1102 22:46:33.824304 38196 out.go:99] [download-only-398096] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + I1102 22:46:33.824360 38196 notify.go:221] Checking for updates... + I1102 22:46:33.824754 38196 out.go:171] MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 22:46:33.824911 38196 out.go:171] KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 22:46:33.825089 38196 out.go:171] MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 22:46:33.825217 38196 out.go:171] MINIKUBE_BIN=out/minikube-linux-amd64 + W1102 22:46:33.825506 38196 out.go:336] minikube skips various validations when --force is supplied; this may lead to unexpected behavior + I1102 22:46:33.825647 38196 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 22:46:33.839102 38196 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 22:46:33.839149 38196 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:46:33.869553 38196 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:0 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:50 SystemTime:2025-11-02 22:46:33.86376849 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:46:33.869611 38196 docker.go:319] overlay module found + I1102 22:46:33.869829 38196 out.go:99] Using the docker driver based on user configuration + I1102 22:46:33.869837 38196 start.go:309] selected driver: docker + I1102 22:46:33.869840 38196 start.go:930] validating driver "docker" against + I1102 22:46:33.869902 38196 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:46:33.898229 38196 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:0 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:50 SystemTime:2025-11-02 22:46:33.892898621 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:46:33.898298 38196 start_flags.go:327] no existing cluster config was found, will generate one from the flags + I1102 22:46:33.898621 38196 start_flags.go:410] Using suggested 15500MB memory alloc based on sys=62294MB, container=62294MB + I1102 22:46:33.898705 38196 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true] + I1102 22:46:33.898908 38196 out.go:171] Using Docker driver with root privileges + + + * The control-plane node download-only-398096 host does not exist + To start a cluster, run: "minikube start -p download-only-398096" + + -- /stdout -- + aaa_download_only_test.go:184: minikube logs failed with error: exit status 85 +=== RUN TestDownloadOnly/v1.34.1/DeleteAll + aaa_download_only_test.go:196: (dbg) Run: out/minikube-linux-amd64 delete --all +=== RUN TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds + aaa_download_only_test.go:207: (dbg) Run: out/minikube-linux-amd64 delete -p download-only-398096 + helpers_test.go:175: Cleaning up "download-only-012415" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p download-only-012415 + helpers_test.go:175: Cleaning up "download-only-398096" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p download-only-398096 +--- PASS: TestDownloadOnly (10.77s) + --- PASS: TestDownloadOnly/v1.28.0 (6.53s) + --- PASS: TestDownloadOnly/v1.28.0/json-events (6.31s) + --- PASS: TestDownloadOnly/v1.28.0/preload-exists (0.00s) + --- SKIP: TestDownloadOnly/v1.28.0/cached-images (0.00s) + --- SKIP: TestDownloadOnly/v1.28.0/binaries (0.00s) + --- SKIP: TestDownloadOnly/v1.28.0/kubectl (0.00s) + --- PASS: TestDownloadOnly/v1.28.0/LogsDuration (0.03s) + --- PASS: TestDownloadOnly/v1.28.0/DeleteAll (0.12s) + --- PASS: TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds (0.07s) + --- PASS: TestDownloadOnly/v1.34.1 (4.10s) + --- PASS: TestDownloadOnly/v1.34.1/json-events (3.87s) + --- PASS: TestDownloadOnly/v1.34.1/preload-exists (0.00s) + --- SKIP: TestDownloadOnly/v1.34.1/cached-images (0.00s) + --- SKIP: TestDownloadOnly/v1.34.1/binaries (0.00s) + --- SKIP: TestDownloadOnly/v1.34.1/kubectl (0.00s) + --- PASS: TestDownloadOnly/v1.34.1/LogsDuration (0.03s) + --- PASS: TestDownloadOnly/v1.34.1/DeleteAll (0.12s) + --- PASS: TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds (0.07s) +=== RUN TestDownloadOnlyKic + aaa_download_only_test.go:231: (dbg) Run: out/minikube-linux-amd64 start --download-only -p download-docker-839408 --alsologtostderr --driver=docker --container-runtime=docker + helpers_test.go:175: Cleaning up "download-docker-839408" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p download-docker-839408 +--- PASS: TestDownloadOnlyKic (0.19s) +=== RUN TestBinaryMirror +I1102 22:46:38.227899 37869 binary.go:74] Not caching binary, using https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl?checksum=file:https://dl.k8s.io/release/v1.34.1/bin/linux/amd64/kubectl.sha256 + aaa_download_only_test.go:309: (dbg) Run: out/minikube-linux-amd64 start --download-only -p binary-mirror-380715 --alsologtostderr --binary-mirror http://127.0.0.1:33231 --driver=docker --container-runtime=docker + helpers_test.go:175: Cleaning up "binary-mirror-380715" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p binary-mirror-380715 +--- PASS: TestBinaryMirror (0.33s) +=== RUN TestOffline +=== PAUSE TestOffline +=== RUN TestAddons +=== RUN TestAddons/PreSetup +=== RUN TestAddons/PreSetup/EnablingAddonOnNonExistingCluster +=== PAUSE TestAddons/PreSetup/EnablingAddonOnNonExistingCluster +=== RUN TestAddons/PreSetup/DisablingAddonOnNonExistingCluster +=== PAUSE TestAddons/PreSetup/DisablingAddonOnNonExistingCluster +=== CONT TestAddons/PreSetup/EnablingAddonOnNonExistingCluster +=== CONT TestAddons/PreSetup/DisablingAddonOnNonExistingCluster + addons_test.go:1000: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p addons-448331 + addons_test.go:1011: (dbg) Run: out/minikube-linux-amd64 addons disable dashboard -p addons-448331 + addons_test.go:1000: (dbg) Non-zero exit: out/minikube-linux-amd64 addons enable dashboard -p addons-448331: exit status 85 (27.792523ms) + + -- stdout -- + * Profile "addons-448331" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p addons-448331" + + -- /stdout -- + addons_test.go:1011: (dbg) Non-zero exit: out/minikube-linux-amd64 addons disable dashboard -p addons-448331: exit status 85 (28.509558ms) + + -- stdout -- + * Profile "addons-448331" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p addons-448331" + + -- /stdout -- +=== RUN TestAddons/Setup + addons_test.go:108: (dbg) Run: out/minikube-linux-amd64 start -p addons-448331 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher + addons_test.go:108: (dbg) Done: out/minikube-linux-amd64 start -p addons-448331 --wait=true --memory=4096 --alsologtostderr --addons=registry --addons=registry-creds --addons=metrics-server --addons=volumesnapshots --addons=csi-hostpath-driver --addons=gcp-auth --addons=cloud-spanner --addons=inspektor-gadget --addons=nvidia-device-plugin --addons=yakd --addons=volcano --addons=amd-gpu-device-plugin --driver=docker --container-runtime=docker --addons=ingress --addons=ingress-dns --addons=storage-provisioner-rancher: (3m3.37250879s) +=== RUN TestAddons/serial +=== RUN TestAddons/serial/Volcano + addons_test.go:884: volcano-controller stabilized in 7.407446ms + addons_test.go:868: volcano-scheduler stabilized in 7.472858ms + addons_test.go:876: volcano-admission stabilized in 7.662561ms + addons_test.go:890: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-scheduler" in namespace "volcano-system" ... + helpers_test.go:352: "volcano-scheduler-76c996c8bf-8pdjm" [1d10156c-c806-48ee-9f7c-7007285d314a] Running + addons_test.go:890: (dbg) TestAddons/serial/Volcano: app=volcano-scheduler healthy within 5.001997777s + addons_test.go:894: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-admission" in namespace "volcano-system" ... + helpers_test.go:352: "volcano-admission-6c447bd768-dxl47" [fa80b175-7bdd-4c24-a0f2-4e256e57464f] Running + addons_test.go:894: (dbg) TestAddons/serial/Volcano: app=volcano-admission healthy within 5.002216759s + addons_test.go:898: (dbg) TestAddons/serial/Volcano: waiting 6m0s for pods matching "app=volcano-controller" in namespace "volcano-system" ... + helpers_test.go:352: "volcano-controllers-6fd4f85cb8-jb6w8" [d96404e7-09a6-46c0-9e48-92290bf2b989] Running + addons_test.go:898: (dbg) TestAddons/serial/Volcano: app=volcano-controller healthy within 5.002124279s + addons_test.go:903: (dbg) Run: kubectl --context addons-448331 delete -n volcano-system job volcano-admission-init + addons_test.go:909: (dbg) Run: kubectl --context addons-448331 create -f testdata/vcjob.yaml + addons_test.go:917: (dbg) Run: kubectl --context addons-448331 get vcjob -n my-volcano + addons_test.go:935: (dbg) TestAddons/serial/Volcano: waiting 3m0s for pods matching "volcano.sh/job-name=test-job" in namespace "my-volcano" ... + helpers_test.go:352: "test-job-nginx-0" [15bf48b5-a0fc-4820-9c47-0a72efef319c] Pending + helpers_test.go:352: "test-job-nginx-0" [15bf48b5-a0fc-4820-9c47-0a72efef319c] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx]) + helpers_test.go:352: "test-job-nginx-0" [15bf48b5-a0fc-4820-9c47-0a72efef319c] Running + addons_test.go:935: (dbg) TestAddons/serial/Volcano: volcano.sh/job-name=test-job healthy within 18.00219782s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable volcano --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable volcano --alsologtostderr -v=1: (11.542066615s) +=== RUN TestAddons/serial/GCPAuth +=== RUN TestAddons/serial/GCPAuth/Namespaces + addons_test.go:630: (dbg) Run: kubectl --context addons-448331 create ns new-namespace + addons_test.go:644: (dbg) Run: kubectl --context addons-448331 get secret gcp-auth -n new-namespace +=== RUN TestAddons/serial/GCPAuth/FakeCredentials + addons_test.go:675: (dbg) Run: kubectl --context addons-448331 create -f testdata/busybox.yaml + addons_test.go:682: (dbg) Run: kubectl --context addons-448331 create sa gcp-auth-test + addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... + helpers_test.go:352: "busybox" [7ab2bbcb-7f61-43d2-a895-45b5c2cf7f23] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) + helpers_test.go:352: "busybox" [7ab2bbcb-7f61-43d2-a895-45b5c2cf7f23] Running + addons_test.go:688: (dbg) TestAddons/serial/GCPAuth/FakeCredentials: integration-test=busybox healthy within 8.001580773s + addons_test.go:694: (dbg) Run: kubectl --context addons-448331 exec busybox -- /bin/sh -c "printenv GOOGLE_APPLICATION_CREDENTIALS" + addons_test.go:706: (dbg) Run: kubectl --context addons-448331 describe sa gcp-auth-test + addons_test.go:744: (dbg) Run: kubectl --context addons-448331 exec busybox -- /bin/sh -c "printenv GOOGLE_CLOUD_PROJECT" +=== RUN TestAddons/serial/GCPAuth/RealCredentials + addons_test.go:763: skipping GCPAuth addon test until 'Permission "artifactregistry.repositories.downloadArtifacts" denied on resource "projects/k8s-minikube/locations/us/repositories/test-artifacts" (or it may not exist)' issue is resolved + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable gcp-auth --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable gcp-auth --alsologtostderr -v=1: (8.572548051s) +=== RUN TestAddons/parallel +=== RUN TestAddons/parallel/Registry +=== PAUSE TestAddons/parallel/Registry +=== RUN TestAddons/parallel/RegistryCreds +=== PAUSE TestAddons/parallel/RegistryCreds +=== RUN TestAddons/parallel/Ingress +=== PAUSE TestAddons/parallel/Ingress +=== RUN TestAddons/parallel/InspektorGadget +=== PAUSE TestAddons/parallel/InspektorGadget +=== RUN TestAddons/parallel/MetricsServer +=== PAUSE TestAddons/parallel/MetricsServer +=== RUN TestAddons/parallel/Olm +=== PAUSE TestAddons/parallel/Olm +=== RUN TestAddons/parallel/CSI +=== PAUSE TestAddons/parallel/CSI +=== RUN TestAddons/parallel/Headlamp +=== PAUSE TestAddons/parallel/Headlamp +=== RUN TestAddons/parallel/CloudSpanner +=== PAUSE TestAddons/parallel/CloudSpanner +=== RUN TestAddons/parallel/LocalPath +=== PAUSE TestAddons/parallel/LocalPath +=== RUN TestAddons/parallel/NvidiaDevicePlugin +=== PAUSE TestAddons/parallel/NvidiaDevicePlugin +=== RUN TestAddons/parallel/Yakd +=== PAUSE TestAddons/parallel/Yakd +=== RUN TestAddons/parallel/AmdGpuDevicePlugin +=== PAUSE TestAddons/parallel/AmdGpuDevicePlugin +=== CONT TestAddons/parallel/Registry +=== CONT TestAddons/parallel/CSI +=== CONT TestAddons/parallel/Headlamp + addons_test.go:808: (dbg) Run: out/minikube-linux-amd64 addons enable headlamp -p addons-448331 --alsologtostderr -v=1 +=== CONT TestAddons/parallel/LocalPath + addons_test.go:949: (dbg) Run: kubectl --context addons-448331 apply -f testdata/storage-provisioner-rancher/pvc.yaml +I1102 22:50:43.639332 37869 kapi.go:75] Waiting for pod with label "kubernetes.io/minikube-addons=csi-hostpath-driver" in ns "kube-system" ... +I1102 22:50:43.641124 37869 kapi.go:86] Found 3 Pods for label selector kubernetes.io/minikube-addons=csi-hostpath-driver +I1102 22:50:43.641140 37869 kapi.go:107] duration metric: took 1.92104ms to wait for kubernetes.io/minikube-addons=csi-hostpath-driver ... + addons_test.go:549: csi-hostpath-driver pods stabilized in 1.927381ms + addons_test.go:552: (dbg) Run: kubectl --context addons-448331 create -f testdata/csi-hostpath-driver/pvc.yaml + addons_test.go:382: registry stabilized in 44.737858ms + addons_test.go:384: (dbg) TestAddons/parallel/Registry: waiting 6m0s for pods matching "actual-registry=true" in namespace "kube-system" ... + helpers_test.go:352: "registry-6b586f9694-rnngb" [7557e00e-5f0e-48fe-bc3f-d3c78eac7783] Running + addons_test.go:557: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc" in namespace "default" ... + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc hpvc -o jsonpath={.status.phase} -n default + addons_test.go:955: (dbg) Run: kubectl --context addons-448331 apply -f testdata/storage-provisioner-rancher/pod.yaml + addons_test.go:959: (dbg) TestAddons/parallel/LocalPath: waiting 5m0s for pvc "test-pvc" in namespace "default" ... + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: waiting 8m0s for pods matching "app.kubernetes.io/name=headlamp" in namespace "headlamp" ... + helpers_test.go:352: "headlamp-6945c6f4d-hpklk" [349acc01-529a-4ae9-aca9-c43b518f6972] Pending / Ready:ContainersNotReady (containers with unready status: [headlamp]) / ContainersReady:ContainersNotReady (containers with unready status: [headlamp]) + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc hpvc -o jsonpath={.status.phase} -n default + addons_test.go:562: (dbg) Run: kubectl --context addons-448331 create -f testdata/csi-hostpath-driver/pv-pod.yaml + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + addons_test.go:567: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod" in namespace "default" ... + helpers_test.go:352: "task-pv-pod" [64f7e8c6-5a1d-4353-aa2f-8dcf318083f4] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container]) + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + addons_test.go:384: (dbg) TestAddons/parallel/Registry: actual-registry=true healthy within 6.001674884s + addons_test.go:387: (dbg) TestAddons/parallel/Registry: waiting 10m0s for pods matching "registry-proxy=true" in namespace "kube-system" ... + helpers_test.go:352: "registry-proxy-wt98m" [eaf6ab17-5e17-4a5d-859b-d8fd01b43ba5] Running + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o jsonpath={.status.phase} -n default + addons_test.go:962: (dbg) TestAddons/parallel/LocalPath: waiting 3m0s for pods matching "run=test-local-path" in namespace "default" ... + helpers_test.go:352: "test-local-path" [f66f4b39-12e3-4c4d-9167-5515b9ea3ebf] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) + addons_test.go:387: (dbg) TestAddons/parallel/Registry: registry-proxy=true healthy within 5.002276016s + addons_test.go:392: (dbg) Run: kubectl --context addons-448331 delete po -l run=registry-test --now + addons_test.go:397: (dbg) Run: kubectl --context addons-448331 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local" + helpers_test.go:352: "task-pv-pod" [64f7e8c6-5a1d-4353-aa2f-8dcf318083f4] Running + helpers_test.go:352: "headlamp-6945c6f4d-hpklk" [349acc01-529a-4ae9-aca9-c43b518f6972] Running + helpers_test.go:352: "test-local-path" [f66f4b39-12e3-4c4d-9167-5515b9ea3ebf] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted + helpers_test.go:352: "test-local-path" [f66f4b39-12e3-4c4d-9167-5515b9ea3ebf] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted + addons_test.go:962: (dbg) TestAddons/parallel/LocalPath: run=test-local-path healthy within 15.001194207s + addons_test.go:967: (dbg) Run: kubectl --context addons-448331 get pvc test-pvc -o=json + addons_test.go:976: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 ssh "cat /opt/local-path-provisioner/pvc-5a57aacc-6e4c-42a2-a6b4-2e1613c76d5d_default_test-pvc/file1" + addons_test.go:988: (dbg) Run: kubectl --context addons-448331 delete pod test-local-path + addons_test.go:992: (dbg) Run: kubectl --context addons-448331 delete pvc test-pvc + addons_test.go:813: (dbg) TestAddons/parallel/Headlamp: app.kubernetes.io/name=headlamp healthy within 23.002002718s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable headlamp --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable storage-provisioner-rancher --alsologtostderr -v=1 + addons_test.go:397: (dbg) Done: kubectl --context addons-448331 run --rm registry-test --restart=Never --image=gcr.io/k8s-minikube/busybox -it -- sh -c "wget --spider -S http://registry.kube-system.svc.cluster.local": (12.981467817s) + addons_test.go:411: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 ip +2025/11/02 22:51:07 [DEBUG] GET http://192.168.49.2:5000 + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable registry --alsologtostderr -v=1 + addons_test.go:567: (dbg) TestAddons/parallel/CSI: app=task-pv-pod healthy within 23.002876097s + addons_test.go:572: (dbg) Run: kubectl --context addons-448331 create -f testdata/csi-hostpath-driver/snapshot.yaml + addons_test.go:577: (dbg) TestAddons/parallel/CSI: waiting 6m0s for volume snapshot "new-snapshot-demo" in namespace "default" ... + helpers_test.go:427: (dbg) Run: kubectl --context addons-448331 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default +=== CONT TestAddons/parallel/CloudSpanner + addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: waiting 6m0s for pods matching "app=cloud-spanner-emulator" in namespace "default" ... + helpers_test.go:352: "cloud-spanner-emulator-86bd5cbb97-8mwcz" [79ee574a-fc26-487d-9a16-95b61a7b1ecb] Running + helpers_test.go:427: (dbg) Run: kubectl --context addons-448331 get volumesnapshot new-snapshot-demo -o jsonpath={.status.readyToUse} -n default + addons_test.go:582: (dbg) Run: kubectl --context addons-448331 delete pod task-pv-pod + addons_test.go:588: (dbg) Run: kubectl --context addons-448331 delete pvc hpvc + addons_test.go:594: (dbg) Run: kubectl --context addons-448331 create -f testdata/csi-hostpath-driver/pvc-restore.yaml + addons_test.go:599: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pvc "hpvc-restore" in namespace "default" ... + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc hpvc-restore -o jsonpath={.status.phase} -n default + helpers_test.go:402: (dbg) Run: kubectl --context addons-448331 get pvc hpvc-restore -o jsonpath={.status.phase} -n default + addons_test.go:604: (dbg) Run: kubectl --context addons-448331 create -f testdata/csi-hostpath-driver/pv-pod-restore.yaml + addons_test.go:609: (dbg) TestAddons/parallel/CSI: waiting 6m0s for pods matching "app=task-pv-pod-restore" in namespace "default" ... + helpers_test.go:352: "task-pv-pod-restore" [74457049-87d9-4537-82ca-c78f732ce1f3] Pending + helpers_test.go:352: "task-pv-pod-restore" [74457049-87d9-4537-82ca-c78f732ce1f3] Pending / Ready:ContainersNotReady (containers with unready status: [task-pv-container]) / ContainersReady:ContainersNotReady (containers with unready status: [task-pv-container]) + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable headlamp --alsologtostderr -v=1: (5.413506807s) +=== CONT TestAddons/parallel/AmdGpuDevicePlugin + addons_test.go:1038: (dbg) TestAddons/parallel/AmdGpuDevicePlugin: waiting 6m0s for pods matching "name=amd-gpu-device-plugin" in namespace "kube-system" ... + helpers_test.go:352: "amd-gpu-device-plugin-fmvgn" [2e594f80-2c28-4481-a144-b4aa7a2aec22] Running + helpers_test.go:352: "task-pv-pod-restore" [74457049-87d9-4537-82ca-c78f732ce1f3] Running + addons_test.go:840: (dbg) TestAddons/parallel/CloudSpanner: app=cloud-spanner-emulator healthy within 5.002166751s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable cloud-spanner --alsologtostderr -v=1 +=== CONT TestAddons/parallel/InspektorGadget + addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: waiting 8m0s for pods matching "k8s-app=gadget" in namespace "gadget" ... + helpers_test.go:352: "gadget-bb4dc" [0cdd97ed-02d6-4b64-ba02-e9fd4606e68a] Running + addons_test.go:1038: (dbg) TestAddons/parallel/AmdGpuDevicePlugin: name=amd-gpu-device-plugin healthy within 5.001531095s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable amd-gpu-device-plugin --alsologtostderr -v=1 +=== CONT TestAddons/parallel/Olm + addons_test.go:483: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved +=== CONT TestAddons/parallel/MetricsServer + addons_test.go:455: metrics-server stabilized in 2.155974ms + addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: waiting 6m0s for pods matching "k8s-app=metrics-server" in namespace "kube-system" ... + helpers_test.go:352: "metrics-server-85b7d694d7-6kcb7" [512df57c-dd66-490e-9dad-ac93f8fc13a9] Running + addons_test.go:609: (dbg) TestAddons/parallel/CSI: app=task-pv-pod-restore healthy within 7.002031932s + addons_test.go:614: (dbg) Run: kubectl --context addons-448331 delete pod task-pv-pod-restore + addons_test.go:618: (dbg) Run: kubectl --context addons-448331 delete pvc hpvc-restore + addons_test.go:622: (dbg) Run: kubectl --context addons-448331 delete volumesnapshot new-snapshot-demo + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable volumesnapshots --alsologtostderr -v=1 + addons_test.go:823: (dbg) TestAddons/parallel/InspektorGadget: k8s-app=gadget healthy within 6.001745569s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable inspektor-gadget --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable csi-hostpath-driver --alsologtostderr -v=1 +=== CONT TestAddons/parallel/Ingress + addons_test.go:209: (dbg) Run: kubectl --context addons-448331 wait --for=condition=ready --namespace=ingress-nginx pod --selector=app.kubernetes.io/component=controller --timeout=90s + addons_test.go:234: (dbg) Run: kubectl --context addons-448331 replace --force -f testdata/nginx-ingress-v1.yaml + addons_test.go:247: (dbg) Run: kubectl --context addons-448331 replace --force -f testdata/nginx-pod-svc.yaml + addons_test.go:252: (dbg) TestAddons/parallel/Ingress: waiting 8m0s for pods matching "run=nginx" in namespace "default" ... + helpers_test.go:352: "nginx" [ceee36f7-5348-4229-a105-268a8a9e2645] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx]) + addons_test.go:457: (dbg) TestAddons/parallel/MetricsServer: k8s-app=metrics-server healthy within 5.001838492s + addons_test.go:463: (dbg) Run: kubectl --context addons-448331 top pods -n kube-system + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable metrics-server --alsologtostderr -v=1 +=== CONT TestAddons/parallel/RegistryCreds + addons_test.go:323: registry-creds stabilized in 1.564893ms + addons_test.go:325: (dbg) Run: out/minikube-linux-amd64 addons configure registry-creds -f ./testdata/addons_testconfig.json -p addons-448331 + addons_test.go:332: (dbg) Run: kubectl --context addons-448331 -n kube-system get secret -o yaml + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable registry-creds --alsologtostderr -v=1 +=== CONT TestAddons/parallel/Yakd + addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: waiting 2m0s for pods matching "app.kubernetes.io/name=yakd-dashboard" in namespace "yakd-dashboard" ... + helpers_test.go:352: "yakd-dashboard-5ff678cb9-n2fmj" [177802a4-f7ad-47ce-9a3a-424f85bceb78] Running + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable csi-hostpath-driver --alsologtostderr -v=1: (6.28856345s) +=== CONT TestAddons/parallel/NvidiaDevicePlugin + addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: waiting 6m0s for pods matching "name=nvidia-device-plugin-ds" in namespace "kube-system" ... + helpers_test.go:352: "nvidia-device-plugin-daemonset-fmt4s" [f2413f76-429f-4553-945f-20ffdab248a5] Running + addons_test.go:1047: (dbg) TestAddons/parallel/Yakd: app.kubernetes.io/name=yakd-dashboard healthy within 5.001822343s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable yakd --alsologtostderr -v=1 + addons_test.go:1025: (dbg) TestAddons/parallel/NvidiaDevicePlugin: name=nvidia-device-plugin-ds healthy within 5.001500386s + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable nvidia-device-plugin --alsologtostderr -v=1 + helpers_test.go:352: "nginx" [ceee36f7-5348-4229-a105-268a8a9e2645] Running + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable yakd --alsologtostderr -v=1: (5.408554422s) + addons_test.go:252: (dbg) TestAddons/parallel/Ingress: run=nginx healthy within 19.001604875s +I1102 22:51:38.952692 37869 kapi.go:150] Service nginx in namespace default found. + addons_test.go:264: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 ssh "curl -s http://127.0.0.1/ -H 'Host: nginx.example.com'" + addons_test.go:288: (dbg) Run: kubectl --context addons-448331 replace --force -f testdata/ingress-dns-example-v1.yaml + addons_test.go:293: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 ip + addons_test.go:299: (dbg) Run: nslookup hello-john.test 192.168.49.2 + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable ingress-dns --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Run: out/minikube-linux-amd64 -p addons-448331 addons disable ingress --alsologtostderr -v=1 + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable ingress --alsologtostderr -v=1: (7.41041113s) + addons_test.go:1053: (dbg) Done: out/minikube-linux-amd64 -p addons-448331 addons disable storage-provisioner-rancher --alsologtostderr -v=1: (42.439111695s) +=== RUN TestAddons/StoppedEnableDisable + addons_test.go:172: (dbg) Run: out/minikube-linux-amd64 stop -p addons-448331 + addons_test.go:172: (dbg) Done: out/minikube-linux-amd64 stop -p addons-448331: (10.806445948s) + addons_test.go:176: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p addons-448331 + addons_test.go:180: (dbg) Run: out/minikube-linux-amd64 addons disable dashboard -p addons-448331 + addons_test.go:185: (dbg) Run: out/minikube-linux-amd64 addons disable gvisor -p addons-448331 + helpers_test.go:175: Cleaning up "addons-448331" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p addons-448331 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p addons-448331: (2.20085809s) +--- PASS: TestAddons (324.25s) + --- PASS: TestAddons/PreSetup (0.00s) + --- PASS: TestAddons/PreSetup/EnablingAddonOnNonExistingCluster (0.03s) + --- PASS: TestAddons/PreSetup/DisablingAddonOnNonExistingCluster (0.03s) + --- PASS: TestAddons/Setup (183.37s) + --- PASS: TestAddons/serial (61.68s) + --- PASS: TestAddons/serial/Volcano (44.73s) + --- PASS: TestAddons/serial/GCPAuth (16.95s) + --- PASS: TestAddons/serial/GCPAuth/Namespaces (0.07s) + --- PASS: TestAddons/serial/GCPAuth/FakeCredentials (8.30s) + --- SKIP: TestAddons/serial/GCPAuth/RealCredentials (0.00s) + --- PASS: TestAddons/parallel (0.00s) + --- PASS: TestAddons/parallel/Registry (24.52s) + --- PASS: TestAddons/parallel/Headlamp (28.88s) + --- PASS: TestAddons/parallel/CloudSpanner (5.32s) + --- PASS: TestAddons/parallel/AmdGpuDevicePlugin (5.32s) + --- SKIP: TestAddons/parallel/Olm (0.00s) + --- PASS: TestAddons/parallel/InspektorGadget (6.16s) + --- PASS: TestAddons/parallel/MetricsServer (5.42s) + --- PASS: TestAddons/parallel/RegistryCreds (0.39s) + --- PASS: TestAddons/parallel/CSI (42.20s) + --- PASS: TestAddons/parallel/NvidiaDevicePlugin (5.31s) + --- PASS: TestAddons/parallel/Yakd (10.41s) + --- PASS: TestAddons/parallel/Ingress (27.70s) + --- PASS: TestAddons/parallel/LocalPath (66.02s) + --- PASS: TestAddons/StoppedEnableDisable (10.95s) +=== RUN TestCertOptions +=== PAUSE TestCertOptions +=== RUN TestCertExpiration +=== PAUSE TestCertExpiration +=== RUN TestDockerFlags +=== PAUSE TestDockerFlags +=== RUN TestForceSystemdFlag +=== PAUSE TestForceSystemdFlag +=== RUN TestForceSystemdEnv +=== PAUSE TestForceSystemdEnv +=== RUN TestDockerEnvContainerd + docker_test.go:170: running with docker true linux amd64 + docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver +--- SKIP: TestDockerEnvContainerd (0.00s) +=== RUN TestHyperKitDriverInstallOrUpdate + driver_install_or_update_test.go:37: Skip if not darwin. +--- SKIP: TestHyperKitDriverInstallOrUpdate (0.00s) +=== RUN TestHyperkitDriverSkipUpgrade + driver_install_or_update_test.go:101: Skip if not darwin. +--- SKIP: TestHyperkitDriverSkipUpgrade (0.00s) +=== RUN TestErrorSpam +=== RUN TestErrorSpam/setup + error_spam_test.go:81: (dbg) Run: out/minikube-linux-amd64 start -p nospam-598251 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-598251 --driver=docker --container-runtime=docker + error_spam_test.go:81: (dbg) Done: out/minikube-linux-amd64 start -p nospam-598251 -n=1 --memory=3072 --wait=false --log_dir=/tmp/nospam-598251 --driver=docker --container-runtime=docker: (19.580257962s) +=== RUN TestErrorSpam/start + error_spam_test.go:206: Cleaning up 1 logfile(s) ... + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 start --dry-run + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 start --dry-run + error_spam_test.go:172: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 start --dry-run +=== RUN TestErrorSpam/status + error_spam_test.go:206: Cleaning up 0 logfile(s) ... + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 status + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 status + error_spam_test.go:172: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 status +=== RUN TestErrorSpam/pause + error_spam_test.go:206: Cleaning up 0 logfile(s) ... + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 pause + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 pause + error_spam_test.go:172: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 pause +=== RUN TestErrorSpam/unpause + error_spam_test.go:206: Cleaning up 0 logfile(s) ... + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 unpause + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 unpause + error_spam_test.go:172: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 unpause +=== RUN TestErrorSpam/stop + error_spam_test.go:206: Cleaning up 0 logfile(s) ... + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 stop + error_spam_test.go:149: (dbg) Done: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 stop: (10.479062753s) + error_spam_test.go:149: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 stop + error_spam_test.go:172: (dbg) Run: out/minikube-linux-amd64 -p nospam-598251 --log_dir /tmp/nospam-598251 stop + helpers_test.go:175: Cleaning up "nospam-598251" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p nospam-598251 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p nospam-598251: (1.364967224s) +--- PASS: TestErrorSpam (34.05s) + --- PASS: TestErrorSpam/setup (19.58s) + --- PASS: TestErrorSpam/start (0.33s) + --- PASS: TestErrorSpam/status (0.59s) + --- PASS: TestErrorSpam/pause (0.79s) + --- PASS: TestErrorSpam/unpause (0.80s) + --- PASS: TestErrorSpam/stop (10.58s) +=== RUN TestFunctional +=== RUN TestFunctional/serial +=== RUN TestFunctional/serial/CopySyncFile + functional_test.go:1860: local sync path: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/test/nested/copy/37869/hosts +=== RUN TestFunctional/serial/StartWithProxy + functional_test.go:2239: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker + functional_test.go:2239: (dbg) Done: out/minikube-linux-amd64 start -p functional-172481 --memory=4096 --apiserver-port=8441 --wait=all --driver=docker --container-runtime=docker: (1m4.784009472s) +=== RUN TestFunctional/serial/AuditLog +=== RUN TestFunctional/serial/SoftStart +I1102 22:53:41.640553 37869 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + functional_test.go:674: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --alsologtostderr -v=8 + functional_test.go:674: (dbg) Done: out/minikube-linux-amd64 start -p functional-172481 --alsologtostderr -v=8: (55.240579954s) + functional_test.go:678: soft start took 55.240991994s for "functional-172481" cluster. +I1102 22:54:36.881379 37869 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestFunctional/serial/KubeContext + functional_test.go:696: (dbg) Run: kubectl config current-context +=== RUN TestFunctional/serial/KubectlGetPods + functional_test.go:711: (dbg) Run: kubectl --context functional-172481 get po -A +=== RUN TestFunctional/serial/CacheCmd +=== RUN TestFunctional/serial/CacheCmd/cache +=== RUN TestFunctional/serial/CacheCmd/cache/add_remote + functional_test.go:1064: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache add registry.k8s.io/pause:3.1 + functional_test.go:1064: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache add registry.k8s.io/pause:3.3 + functional_test.go:1064: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache add registry.k8s.io/pause:latest +=== RUN TestFunctional/serial/CacheCmd/cache/add_local + functional_test.go:1092: (dbg) Run: docker build -t minikube-local-cache-test:functional-172481 /tmp/TestFunctionalserialCacheCmdcacheadd_local2527681725/001 + functional_test.go:1104: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache add minikube-local-cache-test:functional-172481 + functional_test.go:1109: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache delete minikube-local-cache-test:functional-172481 + functional_test.go:1098: (dbg) Run: docker rmi minikube-local-cache-test:functional-172481 +=== RUN TestFunctional/serial/CacheCmd/cache/CacheDelete + functional_test.go:1117: (dbg) Run: out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.3 +=== RUN TestFunctional/serial/CacheCmd/cache/list + functional_test.go:1125: (dbg) Run: out/minikube-linux-amd64 cache list +=== RUN TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node + functional_test.go:1139: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh sudo crictl images +=== RUN TestFunctional/serial/CacheCmd/cache/cache_reload + functional_test.go:1162: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh sudo docker rmi registry.k8s.io/pause:latest + functional_test.go:1168: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh sudo crictl inspecti registry.k8s.io/pause:latest + functional_test.go:1168: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh sudo crictl inspecti registry.k8s.io/pause:latest: exit status 1 (178.710344ms) + + -- stdout -- + FATA[0000] no such image "registry.k8s.io/pause:latest" present + + -- /stdout -- + ** stderr ** + ssh: Process exited with status 1 + + ** /stderr ** + functional_test.go:1173: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cache reload + functional_test.go:1178: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh sudo crictl inspecti registry.k8s.io/pause:latest +=== RUN TestFunctional/serial/CacheCmd/cache/delete + functional_test.go:1187: (dbg) Run: out/minikube-linux-amd64 cache delete registry.k8s.io/pause:3.1 + functional_test.go:1187: (dbg) Run: out/minikube-linux-amd64 cache delete registry.k8s.io/pause:latest +=== RUN TestFunctional/serial/MinikubeKubectlCmd + functional_test.go:731: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 kubectl -- --context functional-172481 get pods +=== RUN TestFunctional/serial/MinikubeKubectlCmdDirectly + functional_test.go:756: (dbg) Run: out/kubectl --context functional-172481 get pods +=== RUN TestFunctional/serial/ExtraConfig + functional_test.go:772: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all +E1102 22:54:41.962053 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:41.968613 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:41.979923 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:42.001275 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:42.042548 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:42.123865 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:42.285124 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:42.606675 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:43.248058 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:44.529469 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:47.092251 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:54:52.214088 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:55:02.455574 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 22:55:22.937088 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + functional_test.go:772: (dbg) Done: out/minikube-linux-amd64 start -p functional-172481 --extra-config=apiserver.enable-admission-plugins=NamespaceAutoProvision --wait=all: (46.179408759s) + functional_test.go:776: restart took 46.179511948s for "functional-172481" cluster. +I1102 22:55:26.956410 37869 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestFunctional/serial/ComponentHealth + functional_test.go:825: (dbg) Run: kubectl --context functional-172481 get po -l tier=control-plane -n kube-system -o=json + functional_test.go:840: etcd phase: Running + functional_test.go:850: etcd status: Ready + functional_test.go:840: kube-apiserver phase: Running + functional_test.go:850: kube-apiserver status: Ready + functional_test.go:840: kube-controller-manager phase: Running + functional_test.go:850: kube-controller-manager status: Ready + functional_test.go:840: kube-scheduler phase: Running + functional_test.go:850: kube-scheduler status: Ready +=== RUN TestFunctional/serial/LogsCmd + functional_test.go:1251: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 logs +=== RUN TestFunctional/serial/LogsFileCmd + functional_test.go:1265: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 logs --file /tmp/TestFunctionalserialLogsFileCmd3157530293/001/logs.txt +=== RUN TestFunctional/serial/InvalidService + functional_test.go:2326: (dbg) Run: kubectl --context functional-172481 apply -f testdata/invalidsvc.yaml + functional_test.go:2340: (dbg) Run: out/minikube-linux-amd64 service invalid-svc -p functional-172481 + functional_test.go:2340: (dbg) Non-zero exit: out/minikube-linux-amd64 service invalid-svc -p functional-172481: exit status 115 (226.952123ms) + + -- stdout -- + ┌───────────┬─────────────┬─────────────┬───────────────────────────┐ + │ NAMESPACE │ NAME │ TARGET PORT │ URL │ + ├───────────┼─────────────┼─────────────┼───────────────────────────┤ + │ default │ invalid-svc │ 80 │ http://192.168.49.2:31077 │ + └───────────┴─────────────┴─────────────┴───────────────────────────┘ + + + + -- /stdout -- + ** stderr ** + X Exiting due to SVC_UNREACHABLE: service not available: no running pod for service invalid-svc found + * + ╭─────────────────────────────────────────────────────────────────────────────────────────────╮ + │ │ + │ * If the above advice does not help, please let us know: │ + │ https://github.com/kubernetes/minikube/issues/new/choose │ + │ │ + │ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │ + │ * Please also attach the following file to the GitHub issue: │ + │ * - /tmp/minikube_service_55fa930f105421af19e8f86098918369481174a8_0.log │ + │ │ + ╰─────────────────────────────────────────────────────────────────────────────────────────────╯ + + ** /stderr ** + functional_test.go:2332: (dbg) Run: kubectl --context functional-172481 delete -f testdata/invalidsvc.yaml +=== RUN TestFunctional/parallel +=== RUN TestFunctional/parallel/ConfigCmd +=== PAUSE TestFunctional/parallel/ConfigCmd +=== RUN TestFunctional/parallel/DashboardCmd +=== PAUSE TestFunctional/parallel/DashboardCmd +=== RUN TestFunctional/parallel/DryRun +=== PAUSE TestFunctional/parallel/DryRun +=== RUN TestFunctional/parallel/InternationalLanguage +=== PAUSE TestFunctional/parallel/InternationalLanguage +=== RUN TestFunctional/parallel/StatusCmd +=== PAUSE TestFunctional/parallel/StatusCmd +=== RUN TestFunctional/parallel/MountCmd +=== PAUSE TestFunctional/parallel/MountCmd +=== RUN TestFunctional/parallel/ProfileCmd +=== PAUSE TestFunctional/parallel/ProfileCmd +=== RUN TestFunctional/parallel/ServiceCmd +=== PAUSE TestFunctional/parallel/ServiceCmd +=== RUN TestFunctional/parallel/ServiceCmdConnect +=== PAUSE TestFunctional/parallel/ServiceCmdConnect +=== RUN TestFunctional/parallel/AddonsCmd +=== PAUSE TestFunctional/parallel/AddonsCmd +=== RUN TestFunctional/parallel/PersistentVolumeClaim +=== PAUSE TestFunctional/parallel/PersistentVolumeClaim +=== RUN TestFunctional/parallel/TunnelCmd +=== PAUSE TestFunctional/parallel/TunnelCmd +=== RUN TestFunctional/parallel/SSHCmd +=== PAUSE TestFunctional/parallel/SSHCmd +=== RUN TestFunctional/parallel/CpCmd +=== PAUSE TestFunctional/parallel/CpCmd +=== RUN TestFunctional/parallel/MySQL +=== PAUSE TestFunctional/parallel/MySQL +=== RUN TestFunctional/parallel/FileSync +=== PAUSE TestFunctional/parallel/FileSync +=== RUN TestFunctional/parallel/CertSync +=== PAUSE TestFunctional/parallel/CertSync +=== RUN TestFunctional/parallel/UpdateContextCmd +=== PAUSE TestFunctional/parallel/UpdateContextCmd +=== RUN TestFunctional/parallel/DockerEnv +=== PAUSE TestFunctional/parallel/DockerEnv +=== RUN TestFunctional/parallel/PodmanEnv +=== PAUSE TestFunctional/parallel/PodmanEnv +=== RUN TestFunctional/parallel/NodeLabels +=== PAUSE TestFunctional/parallel/NodeLabels +=== RUN TestFunctional/parallel/ImageCommands +=== PAUSE TestFunctional/parallel/ImageCommands +=== RUN TestFunctional/parallel/NonActiveRuntimeDisabled +=== PAUSE TestFunctional/parallel/NonActiveRuntimeDisabled +=== RUN TestFunctional/parallel/Version +=== PAUSE TestFunctional/parallel/Version +=== RUN TestFunctional/parallel/License +=== PAUSE TestFunctional/parallel/License +=== CONT TestFunctional/parallel/ConfigCmd +=== CONT TestFunctional/parallel/CpCmd + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config unset cpus + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cp testdata/cp-test.txt /home/docker/cp-test.txt +=== CONT TestFunctional/parallel/SSHCmd + functional_test.go:1730: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "echo hello" +=== CONT TestFunctional/parallel/ServiceCmd +=== RUN TestFunctional/parallel/ServiceCmd/DeployApp + functional_test.go:1451: (dbg) Run: kubectl --context functional-172481 create deployment hello-node --image kicbase/echo-server + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config get cpus + functional_test.go:1455: (dbg) Run: kubectl --context functional-172481 expose deployment hello-node --type=NodePort --port=8080 + functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 config get cpus: exit status 14 (36.896199ms) + + ** stderr ** + Error: specified key could not be found in config + + ** /stderr ** + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config set cpus 2 + functional_test.go:1460: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: waiting 10m0s for pods matching "app=hello-node" in namespace "default" ... + helpers_test.go:352: "hello-node-75c85bcc94-gkmkc" [bf33d4c1-e1e8-40fe-80d1-bdfa42dd1a49] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server]) + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config get cpus + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config unset cpus + functional_test.go:1214: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 config get cpus + functional_test.go:1214: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 config get cpus: exit status 14 (24.127657ms) + + ** stderr ** + Error: specified key could not be found in config + + ** /stderr ** +=== CONT TestFunctional/parallel/PodmanEnv + functional_test.go:565: only validate podman env with docker container runtime, currently testing docker +=== CONT TestFunctional/parallel/License + functional_test.go:2293: (dbg) Run: out/minikube-linux-amd64 license + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh -n functional-172481 "sudo cat /home/docker/cp-test.txt" + functional_test.go:1747: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "cat /etc/hostname" +=== CONT TestFunctional/parallel/Version +=== RUN TestFunctional/parallel/Version/short +=== PAUSE TestFunctional/parallel/Version/short +=== RUN TestFunctional/parallel/Version/components +=== PAUSE TestFunctional/parallel/Version/components +=== CONT TestFunctional/parallel/NonActiveRuntimeDisabled + functional_test.go:2032: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo systemctl is-active crio" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cp functional-172481:/home/docker/cp-test.txt /tmp/TestFunctionalparallelCpCmd2780791524/001/cp-test.txt +=== CONT TestFunctional/parallel/ImageCommands +=== RUN TestFunctional/parallel/ImageCommands/ImageListShort +=== PAUSE TestFunctional/parallel/ImageCommands/ImageListShort +=== RUN TestFunctional/parallel/ImageCommands/ImageListTable +=== PAUSE TestFunctional/parallel/ImageCommands/ImageListTable +=== RUN TestFunctional/parallel/ImageCommands/ImageListJson +=== PAUSE TestFunctional/parallel/ImageCommands/ImageListJson +=== RUN TestFunctional/parallel/ImageCommands/ImageListYaml +=== PAUSE TestFunctional/parallel/ImageCommands/ImageListYaml +=== RUN TestFunctional/parallel/ImageCommands/ImageBuild +=== PAUSE TestFunctional/parallel/ImageCommands/ImageBuild +=== RUN TestFunctional/parallel/ImageCommands/Setup + functional_test.go:357: (dbg) Run: docker pull kicbase/echo-server:1.0 + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh -n functional-172481 "sudo cat /home/docker/cp-test.txt" + functional_test.go:2032: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh "sudo systemctl is-active crio": exit status 1 (194.709886ms) + + -- stdout -- + inactive + + -- /stdout -- + ** stderr ** + ssh: Process exited with status 3 + + ** /stderr ** +=== CONT TestFunctional/parallel/NodeLabels + functional_test.go:234: (dbg) Run: kubectl --context functional-172481 get nodes --output=go-template "--template='{{range $k, $v := (index .items 0).metadata.labels}}{{$k}} {{end}}'" +=== CONT TestFunctional/parallel/PersistentVolumeClaim + functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 4m0s for pods matching "integration-test=storage-provisioner" in namespace "kube-system" ... + helpers_test.go:352: "storage-provisioner" [1b4d9572-7f6f-4ac5-b952-d57a5ef297d3] Running + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 cp testdata/cp-test.txt /tmp/does/not/exist/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh -n functional-172481 "sudo cat /tmp/does/not/exist/cp-test.txt" +=== CONT TestFunctional/parallel/TunnelCmd +=== RUN TestFunctional/parallel/TunnelCmd/serial +=== RUN TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel + functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] + functional_test_tunnel_test.go:154: (dbg) daemon: [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] + functional_test.go:362: (dbg) Run: docker tag kicbase/echo-server:1.0 kicbase/echo-server:functional-172481 +=== RUN TestFunctional/parallel/ImageCommands/ImageLoadDaemon + functional_test.go:370: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image load --daemon kicbase/echo-server:functional-172481 --alsologtostderr + functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] ... + helpers_test.go:525: unable to kill pid 81414: os: process already finished + helpers_test.go:525: unable to kill pid 81260: os: process already finished + functional_test_tunnel_test.go:194: (dbg) stopping [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] ... + helpers_test.go:507: unable to find parent, assuming dead: process does not exist +=== RUN TestFunctional/parallel/TunnelCmd/serial/StartTunnel + functional_test_tunnel_test.go:129: (dbg) daemon: [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] +=== RUN TestFunctional/parallel/TunnelCmd/serial/WaitService +=== RUN TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup + functional_test_tunnel_test.go:212: (dbg) Run: kubectl --context functional-172481 apply -f testdata/testsvc.yaml + functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: waiting 4m0s for pods matching "run=nginx-svc" in namespace "default" ... + helpers_test.go:352: "nginx-svc" [cd30ebe3-6311-43af-b467-a1a8158bf3f6] Pending / Ready:ContainersNotReady (containers with unready status: [nginx]) / ContainersReady:ContainersNotReady (containers with unready status: [nginx]) + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls +=== RUN TestFunctional/parallel/ImageCommands/ImageReloadDaemon + functional_test.go:380: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image load --daemon kicbase/echo-server:functional-172481 --alsologtostderr + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls +=== RUN TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon + functional_test.go:250: (dbg) Run: docker pull kicbase/echo-server:latest + functional_test.go:255: (dbg) Run: docker tag kicbase/echo-server:latest kicbase/echo-server:functional-172481 + functional_test.go:260: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image load --daemon kicbase/echo-server:functional-172481 --alsologtostderr + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls +=== RUN TestFunctional/parallel/ImageCommands/ImageSaveToFile + functional_test.go:395: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image save kicbase/echo-server:functional-172481 /home/prow/go/src/k8s.io/minikube/echo-server-save.tar --alsologtostderr +=== RUN TestFunctional/parallel/ImageCommands/ImageRemove + functional_test.go:407: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image rm kicbase/echo-server:functional-172481 --alsologtostderr + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls +=== RUN TestFunctional/parallel/ImageCommands/ImageLoadFromFile + functional_test.go:424: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image load /home/prow/go/src/k8s.io/minikube/echo-server-save.tar --alsologtostderr + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls +=== RUN TestFunctional/parallel/ImageCommands/ImageSaveDaemon + functional_test.go:434: (dbg) Run: docker rmi kicbase/echo-server:functional-172481 + functional_test.go:439: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image save --daemon kicbase/echo-server:functional-172481 --alsologtostderr + functional_test.go:447: (dbg) Run: docker image inspect kicbase/echo-server:functional-172481 +=== CONT TestFunctional/parallel/AddonsCmd + functional_test.go:1695: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 addons list + functional_test.go:1707: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 addons list -o json +=== CONT TestFunctional/parallel/StatusCmd + functional_test.go:869: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 status + functional_test.go:875: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 status -f host:{{.Host}},kublet:{{.Kubelet}},apiserver:{{.APIServer}},kubeconfig:{{.Kubeconfig}} + functional_test.go:887: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 status -o json +=== CONT TestFunctional/parallel/ProfileCmd +=== RUN TestFunctional/parallel/ProfileCmd/profile_not_create + functional_test.go:1285: (dbg) Run: out/minikube-linux-amd64 profile lis + functional_test.go:1290: (dbg) Run: out/minikube-linux-amd64 profile list --output json + helpers_test.go:352: "hello-node-75c85bcc94-gkmkc" [bf33d4c1-e1e8-40fe-80d1-bdfa42dd1a49] Running +=== RUN TestFunctional/parallel/ProfileCmd/profile_list + functional_test.go:1325: (dbg) Run: out/minikube-linux-amd64 profile list + functional_test.go:1330: Took "214.973746ms" to run "out/minikube-linux-amd64 profile list" + functional_test.go:1339: (dbg) Run: out/minikube-linux-amd64 profile list -l + functional_test.go:1344: Took "28.743625ms" to run "out/minikube-linux-amd64 profile list -l" +=== RUN TestFunctional/parallel/ProfileCmd/profile_json_output + functional_test.go:1376: (dbg) Run: out/minikube-linux-amd64 profile list -o json + functional_test_pvc_test.go:50: (dbg) TestFunctional/parallel/PersistentVolumeClaim: integration-test=storage-provisioner healthy within 5.003382157s + functional_test_pvc_test.go:55: (dbg) Run: kubectl --context functional-172481 get storageclass -o=json + functional_test.go:1381: Took "214.048392ms" to run "out/minikube-linux-amd64 profile list -o json" + functional_test.go:1389: (dbg) Run: out/minikube-linux-amd64 profile list -o json --light + functional_test.go:1394: Took "31.180667ms" to run "out/minikube-linux-amd64 profile list -o json --light" +=== CONT TestFunctional/parallel/MountCmd +=== RUN TestFunctional/parallel/MountCmd/any-port + functional_test_mount_test.go:73: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdany-port1194281090/001:/mount-9p --alsologtostderr -v=1] + functional_test_mount_test.go:107: wrote "test-1762124138002575476" to /tmp/TestFunctionalparallelMountCmdany-port1194281090/001/created-by-test + functional_test_mount_test.go:107: wrote "test-1762124138002575476" to /tmp/TestFunctionalparallelMountCmdany-port1194281090/001/created-by-test-removed-by-pod + functional_test_mount_test.go:107: wrote "test-1762124138002575476" to /tmp/TestFunctionalparallelMountCmdany-port1194281090/001/test-1762124138002575476 + functional_test_mount_test.go:115: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p" + functional_test_pvc_test.go:75: (dbg) Run: kubectl --context functional-172481 apply -f testdata/storage-provisioner/pvc.yaml + functional_test_pvc_test.go:82: (dbg) Run: kubectl --context functional-172481 get pvc myclaim -o=json + functional_test_pvc_test.go:131: (dbg) Run: kubectl --context functional-172481 apply -f testdata/storage-provisioner/pod.yaml +I1102 22:55:38.190034 37869 detect.go:223] nested VM detected + functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 6m0s for pods matching "test=storage-provisioner" in namespace "default" ... + functional_test_mount_test.go:115: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (188.786691ms) + + ** stderr ** + ssh: Process exited with status 1 + + ** /stderr ** +I1102 22:55:38.191594 37869 retry.go:31] will retry after 372.860648ms: exit status 1 + helpers_test.go:352: "sp-pod" [b2b4356f-85f2-4eb2-90f4-4d834765339c] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend]) + functional_test_mount_test.go:115: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p" + functional_test_mount_test.go:129: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh -- ls -la /mount-9p + functional_test_mount_test.go:133: guest mount directory contents + total 2 + -rw-r--r-- 1 docker docker 24 Nov 2 22:55 created-by-test + -rw-r--r-- 1 docker docker 24 Nov 2 22:55 created-by-test-removed-by-pod + -rw-r--r-- 1 docker docker 24 Nov 2 22:55 test-1762124138002575476 + functional_test_mount_test.go:137: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh cat /mount-9p/test-1762124138002575476 + functional_test_mount_test.go:148: (dbg) Run: kubectl --context functional-172481 replace --force -f testdata/busybox-mount-test.yaml + functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: waiting 4m0s for pods matching "integration-test=busybox-mount" in namespace "default" ... + helpers_test.go:352: "busybox-mount" [cf04f292-65b0-4a12-bf22-23f0e40c54b9] Pending + helpers_test.go:352: "busybox-mount" [cf04f292-65b0-4a12-bf22-23f0e40c54b9] Pending / Ready:ContainersNotReady (containers with unready status: [mount-munger]) / ContainersReady:ContainersNotReady (containers with unready status: [mount-munger]) + functional_test.go:1460: (dbg) TestFunctional/parallel/ServiceCmd/DeployApp: app=hello-node healthy within 11.001821301s +=== RUN TestFunctional/parallel/ServiceCmd/List + functional_test.go:1469: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service list +=== RUN TestFunctional/parallel/ServiceCmd/JSONOutput + functional_test.go:1499: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service list -o json + functional_test.go:1504: Took "199.343772ms" to run "out/minikube-linux-amd64 -p functional-172481 service list -o json" +=== RUN TestFunctional/parallel/ServiceCmd/HTTPS + functional_test.go:1519: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service --namespace=default --https --url hello-node + functional_test.go:1532: found endpoint: https://192.168.49.2:32413 +=== RUN TestFunctional/parallel/ServiceCmd/Format + functional_test.go:1550: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service hello-node --url --format={{.IP}} +=== RUN TestFunctional/parallel/ServiceCmd/URL + functional_test.go:1569: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service hello-node --url + functional_test.go:1575: found endpoint for hello-node: http://192.168.49.2:32413 +=== CONT TestFunctional/parallel/DryRun + functional_test.go:989: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --dry-run --memory 250MB --alsologtostderr --driver=docker --container-runtime=docker + functional_test.go:989: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-172481 --dry-run --memory 250MB --alsologtostderr --driver=docker --container-runtime=docker: exit status 23 (77.88732ms) + + -- stdout -- + * [functional-172481] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + - MINIKUBE_BIN=out/minikube-linux-amd64 + - MINIKUBE_FORCE_SYSTEMD= + * Using the docker driver based on existing profile + + + + -- /stdout -- + ** stderr ** + I1102 22:55:44.518368 84388 out.go:360] Setting OutFile to fd 1 ... + I1102 22:55:44.518532 84388 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:55:44.518536 84388 out.go:374] Setting ErrFile to fd 2... + I1102 22:55:44.518539 84388 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:55:44.518646 84388 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:55:44.518862 84388 out.go:368] Setting JSON to false + I1102 22:55:44.526625 84388 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1036479,"bootTime":1761087666,"procs":83,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 22:55:44.526680 84388 start.go:143] virtualization: kvm guest + I1102 22:55:44.526874 84388 out.go:179] * [functional-172481] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + I1102 22:55:44.527030 84388 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 22:55:44.527059 84388 notify.go:221] Checking for updates... + I1102 22:55:44.527226 84388 out.go:179] - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 22:55:44.527324 84388 out.go:179] - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 22:55:44.527477 84388 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64 + I1102 22:55:44.527604 84388 out.go:179] - MINIKUBE_FORCE_SYSTEMD= + I1102 22:55:44.527993 84388 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:55:44.528267 84388 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 22:55:44.541199 84388 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 22:55:44.541259 84388 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:55:44.568506 84388 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:38 OomKillDisable:false NGoroutines:62 SystemTime:2025-11-02 22:55:44.562949953 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:55:44.568562 84388 docker.go:319] overlay module found + I1102 22:55:44.568792 84388 out.go:179] * Using the docker driver based on existing profile + I1102 22:55:44.568947 84388 start.go:309] selected driver: docker + I1102 22:55:44.568952 84388 start.go:930] validating driver "docker" against &{Name:functional-172481 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:functional-172481 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 22:55:44.568990 84388 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} + I1102 22:55:44.569432 84388 out.go:203] + W1102 22:55:44.569752 84388 out.go:285] X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB + X Exiting due to RSRC_INSUFFICIENT_REQ_MEMORY: Requested memory allocation 250MiB is less than the usable minimum of 1800MB + I1102 22:55:44.569832 84388 out.go:203] + + ** /stderr ** + functional_test.go:1006: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --dry-run --alsologtostderr -v=1 --driver=docker --container-runtime=docker +=== CONT TestFunctional/parallel/InternationalLanguage + functional_test.go:1035: (dbg) Run: out/minikube-linux-amd64 start -p functional-172481 --dry-run --memory 250MB --alsologtostderr --driver=docker --container-runtime=docker + functional_test.go:1035: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p functional-172481 --dry-run --memory 250MB --alsologtostderr --driver=docker --container-runtime=docker: exit status 23 (77.746479ms) + + -- stdout -- + * [functional-172481] minikube v1.37.0 sur Debian 12.12 (kvm/amd64) + - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + - MINIKUBE_BIN=out/minikube-linux-amd64 + - MINIKUBE_FORCE_SYSTEMD= + * Utilisation du pilote docker basé sur le profil existant + + + + -- /stdout -- + ** stderr ** + I1102 22:55:44.699484 84573 out.go:360] Setting OutFile to fd 1 ... + I1102 22:55:44.699552 84573 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:55:44.699555 84573 out.go:374] Setting ErrFile to fd 2... + I1102 22:55:44.699558 84573 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:55:44.699714 84573 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:55:44.699904 84573 out.go:368] Setting JSON to false + I1102 22:55:44.706690 84573 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1036479,"bootTime":1761087666,"procs":83,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 22:55:44.706745 84573 start.go:143] virtualization: kvm guest + I1102 22:55:44.707029 84573 out.go:179] * [functional-172481] minikube v1.37.0 sur Debian 12.12 (kvm/amd64) + I1102 22:55:44.707225 84573 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 22:55:44.707270 84573 notify.go:221] Checking for updates... + I1102 22:55:44.707775 84573 out.go:179] - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 22:55:44.707905 84573 out.go:179] - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 22:55:44.708062 84573 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64 + I1102 22:55:44.708212 84573 out.go:179] - MINIKUBE_FORCE_SYSTEMD= + I1102 22:55:44.708594 84573 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:55:44.708870 84573 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 22:55:44.721288 84573 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 22:55:44.721357 84573 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 22:55:44.748347 84573 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:1 ContainersRunning:1 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:38 OomKillDisable:false NGoroutines:62 SystemTime:2025-11-02 22:55:44.743150042 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 22:55:44.748393 84573 docker.go:319] overlay module found + I1102 22:55:44.749030 84573 out.go:179] * Utilisation du pilote docker basé sur le profil existant + I1102 22:55:44.749728 84573 start.go:309] selected driver: docker + I1102 22:55:44.749733 84573 start.go:930] validating driver "docker" against &{Name:functional-172481 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:4096 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8441 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:functional-172481 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[{Component:apiserver Key:enable-admission-plugins Value:NamespaceAutoProvision}] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.49.2 Port:8441 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[default-storageclass:true storage-provisioner:true] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true apps_running:true default_sa:true extra:true kubelet:true node_ready:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 22:55:44.749776 84573 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} + I1102 22:55:44.750488 84573 out.go:203] + W1102 22:55:44.750904 84573 out.go:285] X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo + X Fermeture en raison de RSRC_INSUFFICIENT_REQ_MEMORY : L'allocation de mémoire demandée 250 Mio est inférieure au minimum utilisable de 1800 Mo + I1102 22:55:44.751014 84573 out.go:203] + + ** /stderr ** +=== CONT TestFunctional/parallel/DashboardCmd + functional_test.go:920: (dbg) daemon: [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-172481 --alsologtostderr -v=1] + helpers_test.go:352: "nginx-svc" [cd30ebe3-6311-43af-b467-a1a8158bf3f6] Running + functional_test_tunnel_test.go:216: (dbg) TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup: run=nginx-svc healthy within 22.002216365s +I1102 22:55:55.947172 37869 kapi.go:150] Service nginx-svc in namespace default found. +=== RUN TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP + functional_test_tunnel_test.go:234: (dbg) Run: kubectl --context functional-172481 get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip} +=== RUN TestFunctional/parallel/TunnelCmd/serial/AccessDirect + functional_test_tunnel_test.go:299: tunnel at http://10.106.65.170 is working! +=== RUN TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding +=== RUN TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding +=== RUN TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding +=== RUN TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel + functional_test_tunnel_test.go:434: (dbg) stopping [out/minikube-linux-amd64 -p functional-172481 tunnel --alsologtostderr] ... + functional_test_tunnel_test.go:437: failed to stop process: signal: terminated +=== CONT TestFunctional/parallel/CertSync + functional_test.go:1977: Checking for existence of /etc/ssl/certs/37869.pem within VM + functional_test.go:1978: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /etc/ssl/certs/37869.pem" + functional_test.go:1977: Checking for existence of /usr/share/ca-certificates/37869.pem within VM + functional_test.go:1978: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /usr/share/ca-certificates/37869.pem" + functional_test.go:1977: Checking for existence of /etc/ssl/certs/51391683.0 within VM + functional_test.go:1978: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /etc/ssl/certs/51391683.0" + functional_test.go:2004: Checking for existence of /etc/ssl/certs/378692.pem within VM + functional_test.go:2005: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /etc/ssl/certs/378692.pem" + functional_test.go:2004: Checking for existence of /usr/share/ca-certificates/378692.pem within VM + functional_test.go:2005: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /usr/share/ca-certificates/378692.pem" + functional_test.go:2004: Checking for existence of /etc/ssl/certs/3ec20f2e.0 within VM + functional_test.go:2005: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /etc/ssl/certs/3ec20f2e.0" +=== CONT TestFunctional/parallel/DockerEnv +=== RUN TestFunctional/parallel/DockerEnv/bash + functional_test.go:514: (dbg) Run: /bin/bash -c "eval $(out/minikube-linux-amd64 -p functional-172481 docker-env) && out/minikube-linux-amd64 status -p functional-172481" + functional_test.go:537: (dbg) Run: /bin/bash -c "eval $(out/minikube-linux-amd64 -p functional-172481 docker-env) && docker images" +=== CONT TestFunctional/parallel/UpdateContextCmd +=== RUN TestFunctional/parallel/UpdateContextCmd/no_changes +=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_changes +=== RUN TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster +=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster +=== RUN TestFunctional/parallel/UpdateContextCmd/no_clusters +=== PAUSE TestFunctional/parallel/UpdateContextCmd/no_clusters +=== CONT TestFunctional/parallel/FileSync + functional_test.go:1934: Checking for existence of /etc/test/nested/copy/37869/hosts within VM + functional_test.go:1936: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo cat /etc/test/nested/copy/37869/hosts" + functional_test.go:1941: file sync test content: Test file for checking file sync process +=== CONT TestFunctional/parallel/ServiceCmdConnect + functional_test.go:1636: (dbg) Run: kubectl --context functional-172481 create deployment hello-node-connect --image kicbase/echo-server + functional_test.go:1640: (dbg) Run: kubectl --context functional-172481 expose deployment hello-node-connect --type=NodePort --port=8080 + functional_test.go:1645: (dbg) TestFunctional/parallel/ServiceCmdConnect: waiting 10m0s for pods matching "app=hello-node-connect" in namespace "default" ... + helpers_test.go:352: "hello-node-connect-7d85dfc575-6twzt" [2b078920-f30b-4de9-a615-37ede9c4d720] Pending / Ready:ContainersNotReady (containers with unready status: [echo-server]) / ContainersReady:ContainersNotReady (containers with unready status: [echo-server]) + helpers_test.go:352: "sp-pod" [b2b4356f-85f2-4eb2-90f4-4d834765339c] Running + helpers_test.go:352: "busybox-mount" [cf04f292-65b0-4a12-bf22-23f0e40c54b9] Pending / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted + helpers_test.go:352: "busybox-mount" [cf04f292-65b0-4a12-bf22-23f0e40c54b9] Succeeded / Initialized:PodCompleted / Ready:PodCompleted / ContainersReady:PodCompleted + functional_test_mount_test.go:153: (dbg) TestFunctional/parallel/MountCmd/any-port: integration-test=busybox-mount healthy within 24.002190859s + functional_test_mount_test.go:169: (dbg) Run: kubectl --context functional-172481 logs busybox-mount + functional_test_mount_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh stat /mount-9p/created-by-test + functional_test_mount_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh stat /mount-9p/created-by-pod + functional_test_mount_test.go:90: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo umount -f /mount-9p" + functional_test_mount_test.go:94: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdany-port1194281090/001:/mount-9p --alsologtostderr -v=1] ... +=== RUN TestFunctional/parallel/MountCmd/specific-port + functional_test_mount_test.go:213: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdspecific-port2700014481/001:/mount-9p --alsologtostderr -v=1 --port 46464] + functional_test_mount_test.go:243: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p" +E1102 22:56:03.899277 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + functional_test_mount_test.go:243: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p": exit status 1 (175.341949ms) + + ** stderr ** + ssh: Process exited with status 1 + + ** /stderr ** +I1102 22:56:04.021712 37869 retry.go:31] will retry after 548.680288ms: exit status 1 + functional_test_mount_test.go:243: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T /mount-9p | grep 9p" + functional_test_mount_test.go:257: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh -- ls -la /mount-9p + functional_test_mount_test.go:261: guest mount directory contents + total 0 + functional_test_mount_test.go:263: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdspecific-port2700014481/001:/mount-9p --alsologtostderr -v=1 --port 46464] ... + functional_test_mount_test.go:264: reading mount text + functional_test_mount_test.go:278: done reading mount text + functional_test_mount_test.go:230: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "sudo umount -f /mount-9p" + functional_test_mount_test.go:230: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh "sudo umount -f /mount-9p": exit status 1 (174.048796ms) + + -- stdout -- + umount: /mount-9p: not mounted. + + -- /stdout -- + ** stderr ** + ssh: Process exited with status 32 + + ** /stderr ** + functional_test_mount_test.go:232: "out/minikube-linux-amd64 -p functional-172481 ssh \"sudo umount -f /mount-9p\"": exit status 1 + functional_test_mount_test.go:234: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdspecific-port2700014481/001:/mount-9p --alsologtostderr -v=1 --port 46464] ... +=== RUN TestFunctional/parallel/MountCmd/VerifyCleanup + functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount1 --alsologtostderr -v=1] + functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount2 --alsologtostderr -v=1] + functional_test_mount_test.go:298: (dbg) daemon: [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount3 --alsologtostderr -v=1] + functional_test_mount_test.go:325: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T" /mount1 + functional_test_mount_test.go:325: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T" /mount1: exit status 1 (199.381846ms) + + ** stderr ** + ssh: Process exited with status 1 + + ** /stderr ** +I1102 22:56:05.509719 37869 retry.go:31] will retry after 459.154155ms: exit status 1 + functional_test_mount_test.go:325: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T" /mount1 + functional_test_mount_test.go:325: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T" /mount2 + functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 28.002687026s + functional_test_pvc_test.go:106: (dbg) Run: kubectl --context functional-172481 exec sp-pod -- touch /tmp/mount/foo + functional_test_pvc_test.go:112: (dbg) Run: kubectl --context functional-172481 delete -f testdata/storage-provisioner/pod.yaml + functional_test_mount_test.go:325: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh "findmnt -T" /mount3 + functional_test_mount_test.go:370: (dbg) Run: out/minikube-linux-amd64 mount -p functional-172481 --kill=true + functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount1 --alsologtostderr -v=1] ... + helpers_test.go:507: unable to find parent, assuming dead: process does not exist + functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount2 --alsologtostderr -v=1] ... + helpers_test.go:507: unable to find parent, assuming dead: process does not exist + functional_test_mount_test.go:313: (dbg) stopping [out/minikube-linux-amd64 mount -p functional-172481 /tmp/TestFunctionalparallelMountCmdVerifyCleanup1541670567/001:/mount3 --alsologtostderr -v=1] ... + helpers_test.go:507: unable to find parent, assuming dead: process does not exist +=== CONT TestFunctional/parallel/MySQL + functional_test.go:1798: (dbg) Run: kubectl --context functional-172481 replace --force -f testdata/mysql.yaml + functional_test.go:1804: (dbg) TestFunctional/parallel/MySQL: waiting 10m0s for pods matching "app=mysql" in namespace "default" ... + functional_test_pvc_test.go:112: (dbg) Done: kubectl --context functional-172481 delete -f testdata/storage-provisioner/pod.yaml: (1.368069702s) + functional_test_pvc_test.go:131: (dbg) Run: kubectl --context functional-172481 apply -f testdata/storage-provisioner/pod.yaml +I1102 22:56:07.728726 37869 detect.go:223] nested VM detected + functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: waiting 6m0s for pods matching "test=storage-provisioner" in namespace "default" ... + helpers_test.go:352: "sp-pod" [de104226-26a9-46b5-977f-2c407eec4f87] Pending / Ready:ContainersNotReady (containers with unready status: [myfrontend]) / ContainersReady:ContainersNotReady (containers with unready status: [myfrontend]) + helpers_test.go:352: "mysql-5bb876957f-fqxfv" [cc6d0c44-a492-4158-bdb8-fc7b577f5c09] Pending / Ready:ContainersNotReady (containers with unready status: [mysql]) / ContainersReady:ContainersNotReady (containers with unready status: [mysql]) + helpers_test.go:352: "hello-node-connect-7d85dfc575-6twzt" [2b078920-f30b-4de9-a615-37ede9c4d720] Running +2025/11/02 22:56:14 [DEBUG] GET http://127.0.0.1:36195/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ + functional_test.go:925: (dbg) stopping [out/minikube-linux-amd64 dashboard --url --port 36195 -p functional-172481 --alsologtostderr -v=1] ... + helpers_test.go:525: unable to kill pid 84818: os: process already finished +=== CONT TestFunctional/parallel/Version/short + functional_test.go:2261: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 version --short +=== CONT TestFunctional/parallel/Version/components + functional_test.go:2275: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 version -o=json --components +=== CONT TestFunctional/parallel/ImageCommands/ImageListShort + functional_test.go:276: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls --format short --alsologtostderr + functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-172481 image ls --format short --alsologtostderr: + registry.k8s.io/pause:latest + registry.k8s.io/pause:3.3 + registry.k8s.io/pause:3.10.1 + registry.k8s.io/pause:3.1 + registry.k8s.io/kube-scheduler:v1.34.1 + registry.k8s.io/kube-proxy:v1.34.1 + registry.k8s.io/kube-controller-manager:v1.34.1 + registry.k8s.io/kube-apiserver:v1.34.1 + registry.k8s.io/etcd:3.6.4-0 + registry.k8s.io/coredns/coredns:v1.12.1 + gcr.io/k8s-minikube/storage-provisioner:v5 + gcr.io/k8s-minikube/busybox:1.28.4-glibc + docker.io/library/nginx:latest + docker.io/library/nginx:alpine + docker.io/library/minikube-local-cache-test:functional-172481 + docker.io/kubernetesui/metrics-scraper: + docker.io/kubernetesui/dashboard: + docker.io/kicbase/echo-server:latest + docker.io/kicbase/echo-server:functional-172481 + functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-172481 image ls --format short --alsologtostderr: + I1102 22:56:14.951959 88515 out.go:360] Setting OutFile to fd 1 ... + I1102 22:56:14.952132 88515 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:14.952136 88515 out.go:374] Setting ErrFile to fd 2... + I1102 22:56:14.952139 88515 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:14.952245 88515 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:56:14.952612 88515 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:14.952672 88515 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:14.952927 88515 cli_runner.go:164] Run: docker container inspect functional-172481 --format={{.State.Status}} + I1102 22:56:14.963344 88515 ssh_runner.go:195] Run: systemctl --version + I1102 22:56:14.963372 88515 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-172481 + I1102 22:56:14.973510 88515 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32778 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/functional-172481/id_rsa Username:docker} + I1102 22:56:15.059752 88515 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}" +=== CONT TestFunctional/parallel/ImageCommands/ImageListYaml + functional_test.go:276: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls --format yaml --alsologtostderr + functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-172481 image ls --format yaml --alsologtostderr: + - id: cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f + repoDigests: [] + repoTags: + - registry.k8s.io/pause:3.10.1 + size: "736000" + - id: 9056ab77afb8e18e04303f11000a9d31b3f16b74c59475b899ae1b342d328d30 + repoDigests: [] + repoTags: + - docker.io/kicbase/echo-server:functional-172481 + - docker.io/kicbase/echo-server:latest + size: "4940000" + - id: 350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06 + repoDigests: [] + repoTags: + - registry.k8s.io/pause:latest + size: "240000" + - id: 07655ddf2eebe5d250f7a72c25f638b27126805d61779741b4e62e69ba080558 + repoDigests: [] + repoTags: + - docker.io/kubernetesui/dashboard: + size: "246000000" + - id: da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e + repoDigests: [] + repoTags: + - registry.k8s.io/pause:3.1 + size: "742000" + - id: c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f + repoDigests: [] + repoTags: + - registry.k8s.io/kube-controller-manager:v1.34.1 + size: "74900000" + - id: c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97 + repoDigests: [] + repoTags: + - registry.k8s.io/kube-apiserver:v1.34.1 + size: "88000000" + - id: fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7 + repoDigests: [] + repoTags: + - registry.k8s.io/kube-proxy:v1.34.1 + size: "71900000" + - id: 52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969 + repoDigests: [] + repoTags: + - registry.k8s.io/coredns/coredns:v1.12.1 + size: "75000000" + - id: 0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da + repoDigests: [] + repoTags: + - registry.k8s.io/pause:3.3 + size: "683000" + - id: 56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c + repoDigests: [] + repoTags: + - gcr.io/k8s-minikube/busybox:1.28.4-glibc + size: "4400000" + - id: b977685e2b9c5379794ccb3ea145aace36599ebbd186700bb0a00971b70576af + repoDigests: [] + repoTags: + - docker.io/library/minikube-local-cache-test:functional-172481 + size: "30" + - id: 7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813 + repoDigests: [] + repoTags: + - registry.k8s.io/kube-scheduler:v1.34.1 + size: "52800000" + - id: 115053965e86b2df4d78af78d7951b8644839d20a03820c6df59a261103315f7 + repoDigests: [] + repoTags: + - docker.io/kubernetesui/metrics-scraper: + size: "43800000" + - id: 6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562 + repoDigests: [] + repoTags: + - gcr.io/k8s-minikube/storage-provisioner:v5 + size: "31500000" + - id: d4918ca78576a537caa7b0c043051c8efc1796de33fee8724ee0fff4a1cabed9 + repoDigests: [] + repoTags: + - docker.io/library/nginx:alpine + size: "52800000" + - id: 9d0e6f6199dcb6e045dad103064601d730fcfaf8d1bd357d969fb70bd5b90dec + repoDigests: [] + repoTags: + - docker.io/library/nginx:latest + size: "152000000" + - id: 5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115 + repoDigests: [] + repoTags: + - registry.k8s.io/etcd:3.6.4-0 + size: "195000000" + + functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-172481 image ls --format yaml --alsologtostderr: + I1102 22:56:15.101486 88570 out.go:360] Setting OutFile to fd 1 ... + I1102 22:56:15.101684 88570 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:15.101688 88570 out.go:374] Setting ErrFile to fd 2... + I1102 22:56:15.101691 88570 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:15.101802 88570 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:56:15.102183 88570 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:15.102238 88570 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:15.102499 88570 cli_runner.go:164] Run: docker container inspect functional-172481 --format={{.State.Status}} + I1102 22:56:15.112687 88570 ssh_runner.go:195] Run: systemctl --version + I1102 22:56:15.112722 88570 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-172481 + I1102 22:56:15.123052 88570 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32778 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/functional-172481/id_rsa Username:docker} + I1102 22:56:15.208728 88570 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}" +=== CONT TestFunctional/parallel/ImageCommands/ImageBuild + functional_test.go:323: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 ssh pgrep buildkitd + functional_test.go:323: (dbg) Non-zero exit: out/minikube-linux-amd64 -p functional-172481 ssh pgrep buildkitd: exit status 1 (176.262685ms) + + ** stderr ** + ssh: Process exited with status 1 + + ** /stderr ** + functional_test.go:330: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image build -t localhost/my-image:functional-172481 testdata/build --alsologtostderr + functional_test.go:1645: (dbg) TestFunctional/parallel/ServiceCmdConnect: app=hello-node-connect healthy within 18.002208187s + functional_test.go:1654: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 service hello-node-connect --url + functional_test.go:1660: found endpoint for hello-node-connect: http://192.168.49.2:32612 + functional_test.go:1680: http://192.168.49.2:32612: success! body: + Request served by hello-node-connect-7d85dfc575-6twzt + + HTTP/1.1 GET / + + Host: 192.168.49.2:32612 + Accept-Encoding: gzip + User-Agent: Go-http-client/1.1 +=== CONT TestFunctional/parallel/ImageCommands/ImageListJson + functional_test.go:276: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls --format json --alsologtostderr + functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-172481 image ls --format json --alsologtostderr: + [{"id":"7dd6aaa1717ab7eaae4578503e4c4d9965fcf5a249e8155fe16379ee9b6cb813","repoDigests":[],"repoTags":["registry.k8s.io/kube-scheduler:v1.34.1"],"size":"52800000"},{"id":"fc25172553d79197ecd840ec8dba1fba68330079355e974b04c1a441e6a4a0b7","repoDigests":[],"repoTags":["registry.k8s.io/kube-proxy:v1.34.1"],"size":"71900000"},{"id":"115053965e86b2df4d78af78d7951b8644839d20a03820c6df59a261103315f7","repoDigests":[],"repoTags":["docker.io/kubernetesui/metrics-scraper:\u003cnone\u003e"],"size":"43800000"},{"id":"0184c1613d92931126feb4c548e5da11015513b9e4c104e7305ee8b53b50a9da","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.3"],"size":"683000"},{"id":"56cc512116c8f894f11ce1995460aef1ee0972d48bc2a3bdb1faaac7c020289c","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/busybox:1.28.4-glibc"],"size":"4400000"},{"id":"d4918ca78576a537caa7b0c043051c8efc1796de33fee8724ee0fff4a1cabed9","repoDigests":[],"repoTags":["docker.io/library/nginx:alpine"],"size":"52800000"},{"id":"c80c8dbafe7dd71fc21527912a6dd20ccd1b71f3e561a5c28337388d0619538f","repoDigests":[],"repoTags":["registry.k8s.io/kube-controller-manager:v1.34.1"],"size":"74900000"},{"id":"cd073f4c5f6a8e9dc6f3125ba00cf60819cae95c1ec84a1f146ee4a9cf9e803f","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.10.1"],"size":"736000"},{"id":"9056ab77afb8e18e04303f11000a9d31b3f16b74c59475b899ae1b342d328d30","repoDigests":[],"repoTags":["docker.io/kicbase/echo-server:functional-172481","docker.io/kicbase/echo-server:latest"],"size":"4940000"},{"id":"6e38f40d628db3002f5617342c8872c935de530d867d0f709a2fbda1a302a562","repoDigests":[],"repoTags":["gcr.io/k8s-minikube/storage-provisioner:v5"],"size":"31500000"},{"id":"da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e","repoDigests":[],"repoTags":["registry.k8s.io/pause:3.1"],"size":"742000"},{"id":"b977685e2b9c5379794ccb3ea145aace36599ebbd186700bb0a00971b70576af","repoDigests":[],"repoTags":["docker.io/library/minikube-local-cache-test:functional-172481"],"size":"30"},{"id":"c3994bc6961024917ec0aeee02e62828108c21a52d87648e30f3080d9cbadc97","repoDigests":[],"repoTags":["registry.k8s.io/kube-apiserver:v1.34.1"],"size":"88000000"},{"id":"5f1f5298c888daa46c4409ff4cefe5ca9d16e479419f94cdb5f5d5563dac0115","repoDigests":[],"repoTags":["registry.k8s.io/etcd:3.6.4-0"],"size":"195000000"},{"id":"07655ddf2eebe5d250f7a72c25f638b27126805d61779741b4e62e69ba080558","repoDigests":[],"repoTags":["docker.io/kubernetesui/dashboard:\u003cnone\u003e"],"size":"246000000"},{"id":"350b164e7ae1dcddeffadd65c76226c9b6dc5553f5179153fb0e36b78f2a5e06","repoDigests":[],"repoTags":["registry.k8s.io/pause:latest"],"size":"240000"},{"id":"9d0e6f6199dcb6e045dad103064601d730fcfaf8d1bd357d969fb70bd5b90dec","repoDigests":[],"repoTags":["docker.io/library/nginx:latest"],"size":"152000000"},{"id":"52546a367cc9e0d924aa3b190596a9167fa6e53245023b5b5baf0f07e5443969","repoDigests":[],"repoTags":["registry.k8s.io/coredns/coredns:v1.12.1"],"size":"75000000"}] + functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-172481 image ls --format json --alsologtostderr: + I1102 22:56:16.440652 88930 out.go:360] Setting OutFile to fd 1 ... + I1102 22:56:16.440754 88930 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:16.440759 88930 out.go:374] Setting ErrFile to fd 2... + I1102 22:56:16.440762 88930 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:16.440854 88930 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:56:16.441217 88930 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:16.441273 88930 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:16.441505 88930 cli_runner.go:164] Run: docker container inspect functional-172481 --format={{.State.Status}} + I1102 22:56:16.451453 88930 ssh_runner.go:195] Run: systemctl --version + I1102 22:56:16.451482 88930 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-172481 + I1102 22:56:16.461543 88930 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32778 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/functional-172481/id_rsa Username:docker} + I1102 22:56:16.546734 88930 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}" +=== CONT TestFunctional/parallel/ImageCommands/ImageListTable + functional_test.go:276: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls --format table --alsologtostderr + functional_test.go:281: (dbg) Stdout: out/minikube-linux-amd64 -p functional-172481 image ls --format table --alsologtostderr: + ┌─────────────────────────────────────────────┬───────────────────┬───────────────┬────────┐ + │ IMAGE │ TAG │ IMAGE ID │ SIZE │ + ├─────────────────────────────────────────────┼───────────────────┼───────────────┼────────┤ + │ registry.k8s.io/pause │ 3.3 │ 0184c1613d929 │ 683kB │ + │ docker.io/library/nginx │ latest │ 9d0e6f6199dcb │ 152MB │ + │ registry.k8s.io/kube-scheduler │ v1.34.1 │ 7dd6aaa1717ab │ 52.8MB │ + │ registry.k8s.io/etcd │ 3.6.4-0 │ 5f1f5298c888d │ 195MB │ + │ registry.k8s.io/pause │ 3.10.1 │ cd073f4c5f6a8 │ 736kB │ + │ registry.k8s.io/pause │ 3.1 │ da86e6ba6ca19 │ 742kB │ + │ docker.io/library/minikube-local-cache-test │ functional-172481 │ b977685e2b9c5 │ 30B │ + │ docker.io/library/nginx │ alpine │ d4918ca78576a │ 52.8MB │ + │ registry.k8s.io/pause │ latest │ 350b164e7ae1d │ 240kB │ + │ registry.k8s.io/kube-apiserver │ v1.34.1 │ c3994bc696102 │ 88MB │ + │ registry.k8s.io/coredns/coredns │ v1.12.1 │ 52546a367cc9e │ 75MB │ + │ docker.io/kubernetesui/dashboard │ │ 07655ddf2eebe │ 246MB │ + │ docker.io/kubernetesui/metrics-scraper │ │ 115053965e86b │ 43.8MB │ + │ gcr.io/k8s-minikube/busybox │ 1.28.4-glibc │ 56cc512116c8f │ 4.4MB │ + │ registry.k8s.io/kube-controller-manager │ v1.34.1 │ c80c8dbafe7dd │ 74.9MB │ + │ registry.k8s.io/kube-proxy │ v1.34.1 │ fc25172553d79 │ 71.9MB │ + │ docker.io/kicbase/echo-server │ functional-172481 │ 9056ab77afb8e │ 4.94MB │ + │ docker.io/kicbase/echo-server │ latest │ 9056ab77afb8e │ 4.94MB │ + │ gcr.io/k8s-minikube/storage-provisioner │ v5 │ 6e38f40d628db │ 31.5MB │ + └─────────────────────────────────────────────┴───────────────────┴───────────────┴────────┘ + functional_test.go:284: (dbg) Stderr: out/minikube-linux-amd64 -p functional-172481 image ls --format table --alsologtostderr: + I1102 22:56:16.587980 88986 out.go:360] Setting OutFile to fd 1 ... + I1102 22:56:16.588131 88986 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:16.588136 88986 out.go:374] Setting ErrFile to fd 2... + I1102 22:56:16.588139 88986 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:16.588241 88986 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:56:16.588585 88986 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:16.588641 88986 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:16.588863 88986 cli_runner.go:164] Run: docker container inspect functional-172481 --format={{.State.Status}} + I1102 22:56:16.598888 88986 ssh_runner.go:195] Run: systemctl --version + I1102 22:56:16.598925 88986 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-172481 + I1102 22:56:16.608826 88986 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32778 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/functional-172481/id_rsa Username:docker} + I1102 22:56:16.693805 88986 ssh_runner.go:195] Run: docker images --no-trunc --format "{{json .}}" +=== CONT TestFunctional/parallel/UpdateContextCmd/no_changes + functional_test.go:2124: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 update-context --alsologtostderr -v=2 +=== CONT TestFunctional/parallel/UpdateContextCmd/no_clusters + functional_test.go:2124: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 update-context --alsologtostderr -v=2 +=== CONT TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster + functional_test.go:2124: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 update-context --alsologtostderr -v=2 + functional_test.go:330: (dbg) Done: out/minikube-linux-amd64 -p functional-172481 image build -t localhost/my-image:functional-172481 testdata/build --alsologtostderr: (8.749054841s) + functional_test.go:338: (dbg) Stderr: out/minikube-linux-amd64 -p functional-172481 image build -t localhost/my-image:functional-172481 testdata/build --alsologtostderr: + I1102 22:56:15.430081 88712 out.go:360] Setting OutFile to fd 1 ... + I1102 22:56:15.430277 88712 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:15.430282 88712 out.go:374] Setting ErrFile to fd 2... + I1102 22:56:15.430285 88712 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 22:56:15.430408 88712 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 22:56:15.430779 88712 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:15.431169 88712 config.go:182] Loaded profile config "functional-172481": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 22:56:15.431419 88712 cli_runner.go:164] Run: docker container inspect functional-172481 --format={{.State.Status}} + I1102 22:56:15.441512 88712 ssh_runner.go:195] Run: systemctl --version + I1102 22:56:15.441567 88712 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" functional-172481 + I1102 22:56:15.451408 88712 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32778 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/functional-172481/id_rsa Username:docker} + I1102 22:56:15.536508 88712 build_images.go:162] Building image from path: /tmp/build.3779219949.tar + I1102 22:56:15.536550 88712 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build + I1102 22:56:15.541581 88712 ssh_runner.go:195] Run: stat -c "%s %y" /var/lib/minikube/build/build.3779219949.tar + I1102 22:56:15.543726 88712 ssh_runner.go:352] existence check for /var/lib/minikube/build/build.3779219949.tar: stat -c "%s %y" /var/lib/minikube/build/build.3779219949.tar: Process exited with status 1 + stdout: + + stderr: + stat: cannot statx '/var/lib/minikube/build/build.3779219949.tar': No such file or directory + I1102 22:56:15.543745 88712 ssh_runner.go:362] scp /tmp/build.3779219949.tar --> /var/lib/minikube/build/build.3779219949.tar (3072 bytes) + I1102 22:56:15.555491 88712 ssh_runner.go:195] Run: sudo mkdir -p /var/lib/minikube/build/build.3779219949 + I1102 22:56:15.560529 88712 ssh_runner.go:195] Run: sudo tar -C /var/lib/minikube/build/build.3779219949 -xf /var/lib/minikube/build/build.3779219949.tar + I1102 22:56:15.565539 88712 docker.go:361] Building image: /var/lib/minikube/build/build.3779219949 + I1102 22:56:15.565576 88712 ssh_runner.go:195] Run: docker build -t localhost/my-image:functional-172481 /var/lib/minikube/build/build.3779219949 + #0 building with "default" instance using docker driver + + #1 [internal] load build definition from Dockerfile + #1 transferring dockerfile: 97B done + #1 DONE 0.0s + + #2 [internal] load metadata for gcr.io/k8s-minikube/busybox:latest + #2 DONE 0.4s + + #3 [internal] load .dockerignore + #3 transferring context: 2B done + #3 DONE 0.0s + + #4 [internal] load build context + #4 transferring context: 62B done + #4 DONE 0.0s + + #5 [1/3] FROM gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b + #5 resolve gcr.io/k8s-minikube/busybox:latest@sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b done + #5 sha256:ca5ae90100d50772da31f3b5016209e25ad61972404e2ccd83d44f10dee7e79b 770B / 770B done + #5 sha256:62ffc2ed7554e4c6d360bce40bbcf196573dd27c4ce080641a2c59867e732dee 527B / 527B done + #5 sha256:beae173ccac6ad749f76713cf4440fe3d21d1043fe616dfbe30775815d1d0f6a 1.46kB / 1.46kB done + #5 sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa 0B / 772.79kB 6.2s + #5 sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa 772.79kB / 772.79kB 6.3s done + #5 extracting sha256:5cc84ad355aaa64f46ea9c7bbcc319a9d808ab15088a27209c9e70ef86e5a2aa done + #5 DONE 6.3s + + #6 [2/3] RUN true + #6 DONE 1.4s + + #7 [3/3] ADD content.txt / + #7 DONE 0.4s + + #8 exporting to image + #8 exporting layers done + #8 writing image sha256:12f9ad7acc72b6824fa25ff6389ab2abf127b499314ebd9fd5ec836be8362998 done + #8 naming to localhost/my-image:functional-172481 done + #8 DONE 0.0s + I1102 22:56:24.135858 88712 ssh_runner.go:235] Completed: docker build -t localhost/my-image:functional-172481 /var/lib/minikube/build/build.3779219949: (8.570259425s) + I1102 22:56:24.135963 88712 ssh_runner.go:195] Run: sudo rm -rf /var/lib/minikube/build/build.3779219949 + I1102 22:56:24.142837 88712 ssh_runner.go:195] Run: sudo rm -f /var/lib/minikube/build/build.3779219949.tar + I1102 22:56:24.148407 88712 build_images.go:218] Built localhost/my-image:functional-172481 from /tmp/build.3779219949.tar + I1102 22:56:24.148432 88712 build_images.go:134] succeeded building to: functional-172481 + I1102 22:56:24.148436 88712 build_images.go:135] failed building to: + functional_test.go:466: (dbg) Run: out/minikube-linux-amd64 -p functional-172481 image ls + helpers_test.go:352: "sp-pod" [de104226-26a9-46b5-977f-2c407eec4f87] Running + helpers_test.go:352: "mysql-5bb876957f-fqxfv" [cc6d0c44-a492-4158-bdb8-fc7b577f5c09] Running + functional_test.go:1804: (dbg) TestFunctional/parallel/MySQL: app=mysql healthy within 25.001900495s + functional_test.go:1812: (dbg) Run: kubectl --context functional-172481 exec mysql-5bb876957f-fqxfv -- mysql -ppassword -e "show databases;" + functional_test_pvc_test.go:140: (dbg) TestFunctional/parallel/PersistentVolumeClaim: test=storage-provisioner healthy within 25.001662335s + functional_test_pvc_test.go:120: (dbg) Run: kubectl --context functional-172481 exec sp-pod -- ls /tmp/mount +=== RUN TestFunctional/delete_echo-server_images + functional_test.go:205: (dbg) Run: docker rmi -f kicbase/echo-server:1.0 + functional_test.go:205: (dbg) Run: docker rmi -f kicbase/echo-server:functional-172481 +=== RUN TestFunctional/delete_my-image_image + functional_test.go:213: (dbg) Run: docker rmi -f localhost/my-image:functional-172481 +=== RUN TestFunctional/delete_minikube_cached_images + functional_test.go:221: (dbg) Run: docker rmi -f minikube-local-cache-test:functional-172481 + helpers_test.go:175: Cleaning up "functional-172481" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p functional-172481 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p functional-172481: (2.349797527s) +--- PASS: TestFunctional (238.33s) + --- PASS: TestFunctional/serial (175.49s) + --- PASS: TestFunctional/serial/CopySyncFile (0.00s) + --- PASS: TestFunctional/serial/StartWithProxy (64.78s) + --- PASS: TestFunctional/serial/AuditLog (0.00s) + --- PASS: TestFunctional/serial/SoftStart (55.24s) + --- PASS: TestFunctional/serial/KubeContext (0.03s) + --- PASS: TestFunctional/serial/KubectlGetPods (0.04s) + --- PASS: TestFunctional/serial/CacheCmd (3.71s) + --- PASS: TestFunctional/serial/CacheCmd/cache (3.71s) + --- PASS: TestFunctional/serial/CacheCmd/cache/add_remote (1.86s) + --- PASS: TestFunctional/serial/CacheCmd/cache/add_local (0.69s) + --- PASS: TestFunctional/serial/CacheCmd/cache/CacheDelete (0.03s) + --- PASS: TestFunctional/serial/CacheCmd/cache/list (0.03s) + --- PASS: TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node (0.19s) + --- PASS: TestFunctional/serial/CacheCmd/cache/cache_reload (0.87s) + --- PASS: TestFunctional/serial/CacheCmd/cache/delete (0.05s) + --- PASS: TestFunctional/serial/MinikubeKubectlCmd (0.06s) + --- PASS: TestFunctional/serial/MinikubeKubectlCmdDirectly (0.06s) + --- PASS: TestFunctional/serial/ExtraConfig (46.18s) + --- PASS: TestFunctional/serial/ComponentHealth (0.04s) + --- PASS: TestFunctional/serial/LogsCmd (0.56s) + --- PASS: TestFunctional/serial/LogsFileCmd (0.56s) + --- PASS: TestFunctional/serial/InvalidService (4.23s) + --- PASS: TestFunctional/parallel (0.00s) + --- PASS: TestFunctional/parallel/ConfigCmd (0.18s) + --- SKIP: TestFunctional/parallel/PodmanEnv (0.00s) + --- PASS: TestFunctional/parallel/SSHCmd (0.38s) + --- PASS: TestFunctional/parallel/License (0.30s) + --- PASS: TestFunctional/parallel/NonActiveRuntimeDisabled (0.19s) + --- PASS: TestFunctional/parallel/NodeLabels (0.04s) + --- PASS: TestFunctional/parallel/CpCmd (1.10s) + --- PASS: TestFunctional/parallel/AddonsCmd (0.07s) + --- PASS: TestFunctional/parallel/StatusCmd (0.61s) + --- PASS: TestFunctional/parallel/ProfileCmd (0.74s) + --- PASS: TestFunctional/parallel/ProfileCmd/profile_not_create (0.25s) + --- PASS: TestFunctional/parallel/ProfileCmd/profile_list (0.24s) + --- PASS: TestFunctional/parallel/ProfileCmd/profile_json_output (0.25s) + --- PASS: TestFunctional/parallel/ServiceCmd (12.14s) + --- PASS: TestFunctional/parallel/ServiceCmd/DeployApp (11.10s) + --- PASS: TestFunctional/parallel/ServiceCmd/List (0.21s) + --- PASS: TestFunctional/parallel/ServiceCmd/JSONOutput (0.20s) + --- PASS: TestFunctional/parallel/ServiceCmd/HTTPS (0.22s) + --- PASS: TestFunctional/parallel/ServiceCmd/Format (0.21s) + --- PASS: TestFunctional/parallel/ServiceCmd/URL (0.21s) + --- PASS: TestFunctional/parallel/DryRun (0.18s) + --- PASS: TestFunctional/parallel/InternationalLanguage (0.08s) + --- PASS: TestFunctional/parallel/TunnelCmd (22.64s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial (22.64s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel (0.40s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/StartTunnel (0.00s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService (22.14s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup (22.10s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP (0.03s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/AccessDirect (0.00s) + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s) + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s) + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s) + --- PASS: TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel (0.11s) + --- PASS: TestFunctional/parallel/CertSync (1.04s) + --- PASS: TestFunctional/parallel/DockerEnv (0.61s) + --- PASS: TestFunctional/parallel/DockerEnv/bash (0.61s) + --- PASS: TestFunctional/parallel/FileSync (0.17s) + --- PASS: TestFunctional/parallel/MountCmd (28.64s) + --- PASS: TestFunctional/parallel/MountCmd/any-port (25.84s) + --- PASS: TestFunctional/parallel/MountCmd/specific-port (1.46s) + --- PASS: TestFunctional/parallel/MountCmd/VerifyCleanup (1.33s) + --- PASS: TestFunctional/parallel/DashboardCmd (29.84s) + --- PASS: TestFunctional/parallel/Version (0.00s) + --- PASS: TestFunctional/parallel/Version/short (0.03s) + --- PASS: TestFunctional/parallel/Version/components (0.30s) + --- PASS: TestFunctional/parallel/ServiceCmdConnect (18.50s) + --- PASS: TestFunctional/parallel/UpdateContextCmd (0.00s) + --- PASS: TestFunctional/parallel/UpdateContextCmd/no_changes (0.07s) + --- PASS: TestFunctional/parallel/UpdateContextCmd/no_clusters (0.07s) + --- PASS: TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster (0.07s) + --- PASS: TestFunctional/parallel/ImageCommands (3.75s) + --- PASS: TestFunctional/parallel/ImageCommands/Setup (0.69s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageLoadDaemon (0.62s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageReloadDaemon (0.52s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon (0.85s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageSaveToFile (0.19s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageRemove (0.29s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageLoadFromFile (0.38s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageSaveDaemon (0.21s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageListShort (0.15s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageListYaml (0.15s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageListJson (0.15s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageListTable (0.15s) + --- PASS: TestFunctional/parallel/ImageCommands/ImageBuild (9.09s) + --- PASS: TestFunctional/parallel/MySQL (25.18s) + --- PASS: TestFunctional/parallel/PersistentVolumeClaim (59.83s) + --- PASS: TestFunctional/delete_echo-server_images (0.02s) + --- PASS: TestFunctional/delete_my-image_image (0.01s) + --- PASS: TestFunctional/delete_minikube_cached_images (0.01s) +=== RUN TestFunctionalNewestKubernetes + functional_test.go:82: +--- SKIP: TestFunctionalNewestKubernetes (0.00s) +=== RUN TestGvisorAddon + gvisor_addon_test.go:34: skipping test because --gvisor=false +--- SKIP: TestGvisorAddon (0.00s) +=== RUN TestMultiControlPlane + ha_test.go:45: (dbg) Run: docker version -f {{.Server.Version}} +=== RUN TestMultiControlPlane/serial +=== RUN TestMultiControlPlane/serial/StartCluster + ha_test.go:101: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker +E1102 22:57:25.822618 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:101: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 start --ha --memory 3072 --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker: (2m36.031949928s) + ha_test.go:107: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 +=== RUN TestMultiControlPlane/serial/DeployApp + ha_test.go:128: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- apply -f ./testdata/ha/ha-pod-dns-test.yaml + ha_test.go:133: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- rollout status deployment/busybox + ha_test.go:133: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 kubectl -- rollout status deployment/busybox: (1.840615007s) + ha_test.go:140: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- get pods -o jsonpath='{.items[*].status.podIP}' + ha_test.go:163: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}' + ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-65c6w -- nslookup kubernetes.io + ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-9k62h -- nslookup kubernetes.io + ha_test.go:171: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-twvnz -- nslookup kubernetes.io + ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-65c6w -- nslookup kubernetes.default + ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-9k62h -- nslookup kubernetes.default + ha_test.go:181: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-twvnz -- nslookup kubernetes.default + ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-65c6w -- nslookup kubernetes.default.svc.cluster.local + ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-9k62h -- nslookup kubernetes.default.svc.cluster.local + ha_test.go:189: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-twvnz -- nslookup kubernetes.default.svc.cluster.local +=== RUN TestMultiControlPlane/serial/PingHostFromPods + ha_test.go:199: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- get pods -o jsonpath='{.items[*].metadata.name}' + ha_test.go:207: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-65c6w -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3" + ha_test.go:218: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-65c6w -- sh -c "ping -c 1 192.168.49.1" + ha_test.go:207: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-9k62h -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3" + ha_test.go:218: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-9k62h -- sh -c "ping -c 1 192.168.49.1" + ha_test.go:207: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-twvnz -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3" + ha_test.go:218: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 kubectl -- exec busybox-7b57f96db7-twvnz -- sh -c "ping -c 1 192.168.49.1" +=== RUN TestMultiControlPlane/serial/AddWorkerNode + ha_test.go:228: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node add --alsologtostderr -v 5 +E1102 22:59:41.962201 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:228: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 node add --alsologtostderr -v 5: (36.333836541s) + ha_test.go:234: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 +=== RUN TestMultiControlPlane/serial/NodeLabels + ha_test.go:255: (dbg) Run: kubectl --context ha-232417 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]" +=== RUN TestMultiControlPlane/serial/HAppyAfterClusterStart + ha_test.go:281: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiControlPlane/serial/CopyFile + ha_test.go:328: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --output json --alsologtostderr -v 5 + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp testdata/cp-test.txt ha-232417:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile3325691845/001/cp-test_ha-232417.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417:/home/docker/cp-test.txt ha-232417-m02:/home/docker/cp-test_ha-232417_ha-232417-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test_ha-232417_ha-232417-m02.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417:/home/docker/cp-test.txt ha-232417-m03:/home/docker/cp-test_ha-232417_ha-232417-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test_ha-232417_ha-232417-m03.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417:/home/docker/cp-test.txt ha-232417-m04:/home/docker/cp-test_ha-232417_ha-232417-m04.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test_ha-232417_ha-232417-m04.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp testdata/cp-test.txt ha-232417-m02:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m02:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile3325691845/001/cp-test_ha-232417-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m02:/home/docker/cp-test.txt ha-232417:/home/docker/cp-test_ha-232417-m02_ha-232417.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test_ha-232417-m02_ha-232417.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m02:/home/docker/cp-test.txt ha-232417-m03:/home/docker/cp-test_ha-232417-m02_ha-232417-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test_ha-232417-m02_ha-232417-m03.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m02:/home/docker/cp-test.txt ha-232417-m04:/home/docker/cp-test_ha-232417-m02_ha-232417-m04.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test_ha-232417-m02_ha-232417-m04.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp testdata/cp-test.txt ha-232417-m03:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m03:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile3325691845/001/cp-test_ha-232417-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m03:/home/docker/cp-test.txt ha-232417:/home/docker/cp-test_ha-232417-m03_ha-232417.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test_ha-232417-m03_ha-232417.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m03:/home/docker/cp-test.txt ha-232417-m02:/home/docker/cp-test_ha-232417-m03_ha-232417-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test_ha-232417-m03_ha-232417-m02.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m03:/home/docker/cp-test.txt ha-232417-m04:/home/docker/cp-test_ha-232417-m03_ha-232417-m04.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test_ha-232417-m03_ha-232417-m04.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp testdata/cp-test.txt ha-232417-m04:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m04:/home/docker/cp-test.txt /tmp/TestMultiControlPlaneserialCopyFile3325691845/001/cp-test_ha-232417-m04.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m04:/home/docker/cp-test.txt ha-232417:/home/docker/cp-test_ha-232417-m04_ha-232417.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417 "sudo cat /home/docker/cp-test_ha-232417-m04_ha-232417.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m04:/home/docker/cp-test.txt ha-232417-m02:/home/docker/cp-test_ha-232417-m04_ha-232417-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m02 "sudo cat /home/docker/cp-test_ha-232417-m04_ha-232417-m02.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 cp ha-232417-m04:/home/docker/cp-test.txt ha-232417-m03:/home/docker/cp-test_ha-232417-m04_ha-232417-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m04 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 ssh -n ha-232417-m03 "sudo cat /home/docker/cp-test_ha-232417-m04_ha-232417-m03.txt" +=== RUN TestMultiControlPlane/serial/StopSecondaryNode + ha_test.go:365: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node stop m02 --alsologtostderr -v 5 +E1102 23:00:09.667053 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:365: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 node stop m02 --alsologtostderr -v 5: (10.514384316s) + ha_test.go:371: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 + ha_test.go:371: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5: exit status 7 (483.951568ms) + + -- stdout -- + ha-232417 + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + + ha-232417-m02 + type: Control Plane + host: Stopped + kubelet: Stopped + apiserver: Stopped + kubeconfig: Stopped + + ha-232417-m03 + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + + ha-232417-m04 + type: Worker + host: Running + kubelet: Running + + + -- /stdout -- + ** stderr ** + I1102 23:00:14.671215 113407 out.go:360] Setting OutFile to fd 1 ... + I1102 23:00:14.671302 113407 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:00:14.671306 113407 out.go:374] Setting ErrFile to fd 2... + I1102 23:00:14.671308 113407 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:00:14.671409 113407 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:00:14.671520 113407 out.go:368] Setting JSON to false + I1102 23:00:14.671532 113407 mustload.go:66] Loading cluster: ha-232417 + I1102 23:00:14.671632 113407 notify.go:221] Checking for updates... + I1102 23:00:14.671844 113407 config.go:182] Loaded profile config "ha-232417": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:00:14.671853 113407 status.go:174] checking status of ha-232417 ... + I1102 23:00:14.672246 113407 cli_runner.go:164] Run: docker container inspect ha-232417 --format={{.State.Status}} + I1102 23:00:14.683895 113407 status.go:371] ha-232417 host status = "Running" (err=) + I1102 23:00:14.683908 113407 host.go:66] Checking if "ha-232417" exists ... + I1102 23:00:14.684122 113407 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-232417 + I1102 23:00:14.694694 113407 host.go:66] Checking if "ha-232417" exists ... + I1102 23:00:14.694877 113407 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:00:14.694908 113407 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-232417 + I1102 23:00:14.705689 113407 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32783 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/ha-232417/id_rsa Username:docker} + I1102 23:00:14.791007 113407 ssh_runner.go:195] Run: systemctl --version + I1102 23:00:14.794711 113407 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet + I1102 23:00:14.802128 113407 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:00:14.832756 113407 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:4 ContainersRunning:3 ContainersPaused:0 ContainersStopped:1 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:66 OomKillDisable:false NGoroutines:81 SystemTime:2025-11-02 23:00:14.826825069 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:00:14.833092 113407 kubeconfig.go:125] found "ha-232417" server: "https://192.168.49.254:8443" + I1102 23:00:14.833107 113407 api_server.go:166] Checking apiserver status ... + I1102 23:00:14.833135 113407 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* + I1102 23:00:14.841513 113407 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2152/cgroup + W1102 23:00:14.846816 113407 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2152/cgroup: Process exited with status 1 + stdout: + + stderr: + I1102 23:00:14.846858 113407 ssh_runner.go:195] Run: ls + I1102 23:00:14.849042 113407 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ... + I1102 23:00:14.851320 113407 api_server.go:279] https://192.168.49.254:8443/healthz returned 200: + ok + I1102 23:00:14.851331 113407 status.go:463] ha-232417 apiserver status = Running (err=) + I1102 23:00:14.851336 113407 status.go:176] ha-232417 status: &{Name:ha-232417 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:00:14.851344 113407 status.go:174] checking status of ha-232417-m02 ... + I1102 23:00:14.851523 113407 cli_runner.go:164] Run: docker container inspect ha-232417-m02 --format={{.State.Status}} + I1102 23:00:14.861485 113407 status.go:371] ha-232417-m02 host status = "Stopped" (err=) + I1102 23:00:14.861493 113407 status.go:384] host is not running, skipping remaining checks + I1102 23:00:14.861496 113407 status.go:176] ha-232417-m02 status: &{Name:ha-232417-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:00:14.861506 113407 status.go:174] checking status of ha-232417-m03 ... + I1102 23:00:14.861704 113407 cli_runner.go:164] Run: docker container inspect ha-232417-m03 --format={{.State.Status}} + I1102 23:00:14.871490 113407 status.go:371] ha-232417-m03 host status = "Running" (err=) + I1102 23:00:14.871500 113407 host.go:66] Checking if "ha-232417-m03" exists ... + I1102 23:00:14.871672 113407 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-232417-m03 + I1102 23:00:14.881705 113407 host.go:66] Checking if "ha-232417-m03" exists ... + I1102 23:00:14.881882 113407 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:00:14.881924 113407 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-232417-m03 + I1102 23:00:14.891863 113407 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32793 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/ha-232417-m03/id_rsa Username:docker} + I1102 23:00:14.975937 113407 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet + I1102 23:00:14.983360 113407 kubeconfig.go:125] found "ha-232417" server: "https://192.168.49.254:8443" + I1102 23:00:14.983370 113407 api_server.go:166] Checking apiserver status ... + I1102 23:00:14.983395 113407 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* + I1102 23:00:14.990823 113407 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2103/cgroup + W1102 23:00:14.995829 113407 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2103/cgroup: Process exited with status 1 + stdout: + + stderr: + I1102 23:00:14.995869 113407 ssh_runner.go:195] Run: ls + I1102 23:00:14.997931 113407 api_server.go:253] Checking apiserver healthz at https://192.168.49.254:8443/healthz ... + I1102 23:00:15.000036 113407 api_server.go:279] https://192.168.49.254:8443/healthz returned 200: + ok + I1102 23:00:15.000050 113407 status.go:463] ha-232417-m03 apiserver status = Running (err=) + I1102 23:00:15.000054 113407 status.go:176] ha-232417-m03 status: &{Name:ha-232417-m03 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:00:15.000078 113407 status.go:174] checking status of ha-232417-m04 ... + I1102 23:00:15.000244 113407 cli_runner.go:164] Run: docker container inspect ha-232417-m04 --format={{.State.Status}} + I1102 23:00:15.010617 113407 status.go:371] ha-232417-m04 host status = "Running" (err=) + I1102 23:00:15.010626 113407 host.go:66] Checking if "ha-232417-m04" exists ... + I1102 23:00:15.010783 113407 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" ha-232417-m04 + I1102 23:00:15.021202 113407 host.go:66] Checking if "ha-232417-m04" exists ... + I1102 23:00:15.021348 113407 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:00:15.021373 113407 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" ha-232417-m04 + I1102 23:00:15.031459 113407 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32798 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/ha-232417-m04/id_rsa Username:docker} + I1102 23:00:15.115751 113407 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet + I1102 23:00:15.123486 113407 status.go:176] ha-232417-m04 status: &{Name:ha-232417-m04 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:} + + ** /stderr ** +=== RUN TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop + ha_test.go:392: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiControlPlane/serial/RestartSecondaryNode + ha_test.go:422: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node start m02 --alsologtostderr -v 5 +E1102 23:00:32.448006 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.454307 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.465591 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.486857 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.528137 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.609371 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:32.770787 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:33.092028 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:33.733946 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:35.016095 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:37.578032 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:00:42.699960 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:422: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 node start m02 --alsologtostderr -v 5: (32.983042781s) + ha_test.go:430: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 + ha_test.go:450: (dbg) Run: kubectl get nodes +=== RUN TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart + ha_test.go:281: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiControlPlane/serial/RestartClusterKeepsNodes + ha_test.go:458: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node list --alsologtostderr -v 5 + ha_test.go:464: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 stop --alsologtostderr -v 5 +E1102 23:00:52.941133 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:01:13.423290 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:464: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 stop --alsologtostderr -v 5: (32.67535357s) + ha_test.go:469: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 start --wait true --alsologtostderr -v 5 +E1102 23:01:54.385083 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:03:16.306903 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:469: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 start --wait true --alsologtostderr -v 5: (2m14.42220118s) + ha_test.go:474: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node list --alsologtostderr -v 5 +=== RUN TestMultiControlPlane/serial/DeleteSecondaryNode + ha_test.go:489: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node delete m03 --alsologtostderr -v 5 + ha_test.go:489: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 node delete m03 --alsologtostderr -v 5: (7.538522068s) + ha_test.go:495: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 + ha_test.go:513: (dbg) Run: kubectl get nodes + ha_test.go:521: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'" +=== RUN TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete + ha_test.go:392: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiControlPlane/serial/StopCluster + ha_test.go:533: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 stop --alsologtostderr -v 5 + ha_test.go:533: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 stop --alsologtostderr -v 5: (31.472105916s) + ha_test.go:539: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 + ha_test.go:539: (dbg) Non-zero exit: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5: exit status 7 (62.708125ms) + + -- stdout -- + ha-232417 + type: Control Plane + host: Stopped + kubelet: Stopped + apiserver: Stopped + kubeconfig: Stopped + + ha-232417-m02 + type: Control Plane + host: Stopped + kubelet: Stopped + apiserver: Stopped + kubeconfig: Stopped + + ha-232417-m04 + type: Worker + host: Stopped + kubelet: Stopped + + + -- /stdout -- + ** stderr ** + I1102 23:04:17.146532 142205 out.go:360] Setting OutFile to fd 1 ... + I1102 23:04:17.146705 142205 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:04:17.146709 142205 out.go:374] Setting ErrFile to fd 2... + I1102 23:04:17.146712 142205 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:04:17.146821 142205 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:04:17.146951 142205 out.go:368] Setting JSON to false + I1102 23:04:17.146962 142205 mustload.go:66] Loading cluster: ha-232417 + I1102 23:04:17.147035 142205 notify.go:221] Checking for updates... + I1102 23:04:17.147258 142205 config.go:182] Loaded profile config "ha-232417": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:04:17.147266 142205 status.go:174] checking status of ha-232417 ... + I1102 23:04:17.147627 142205 cli_runner.go:164] Run: docker container inspect ha-232417 --format={{.State.Status}} + I1102 23:04:17.158687 142205 status.go:371] ha-232417 host status = "Stopped" (err=) + I1102 23:04:17.158700 142205 status.go:384] host is not running, skipping remaining checks + I1102 23:04:17.158705 142205 status.go:176] ha-232417 status: &{Name:ha-232417 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:04:17.158978 142205 status.go:174] checking status of ha-232417-m02 ... + I1102 23:04:17.159177 142205 cli_runner.go:164] Run: docker container inspect ha-232417-m02 --format={{.State.Status}} + I1102 23:04:17.169862 142205 status.go:371] ha-232417-m02 host status = "Stopped" (err=) + I1102 23:04:17.169871 142205 status.go:384] host is not running, skipping remaining checks + I1102 23:04:17.169876 142205 status.go:176] ha-232417-m02 status: &{Name:ha-232417-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:04:17.169886 142205 status.go:174] checking status of ha-232417-m04 ... + I1102 23:04:17.170102 142205 cli_runner.go:164] Run: docker container inspect ha-232417-m04 --format={{.State.Status}} + I1102 23:04:17.179796 142205 status.go:371] ha-232417-m04 host status = "Stopped" (err=) + I1102 23:04:17.179805 142205 status.go:384] host is not running, skipping remaining checks + I1102 23:04:17.179809 142205 status.go:176] ha-232417-m04 status: &{Name:ha-232417-m04 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:} + + ** /stderr ** +=== RUN TestMultiControlPlane/serial/RestartCluster + ha_test.go:562: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 start --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker +E1102 23:04:41.970055 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:05:32.448290 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:562: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 start --wait true --alsologtostderr -v 5 --driver=docker --container-runtime=docker: (1m35.286809734s) + ha_test.go:568: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 + ha_test.go:586: (dbg) Run: kubectl get nodes + ha_test.go:594: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'" +=== RUN TestMultiControlPlane/serial/DegradedAfterClusterRestart + ha_test.go:392: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiControlPlane/serial/AddSecondaryNode + ha_test.go:607: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 node add --control-plane --alsologtostderr -v 5 +E1102 23:06:00.148395 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + ha_test.go:607: (dbg) Done: out/minikube-linux-amd64 -p ha-232417 node add --control-plane --alsologtostderr -v 5: (42.839198895s) + ha_test.go:613: (dbg) Run: out/minikube-linux-amd64 -p ha-232417 status --alsologtostderr -v 5 +=== RUN TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd + ha_test.go:281: (dbg) Run: out/minikube-linux-amd64 profile list --output json + helpers_test.go:175: Cleaning up "ha-232417" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p ha-232417 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p ha-232417: (6.468438632s) +--- PASS: TestMultiControlPlane (608.88s) + --- PASS: TestMultiControlPlane/serial (602.39s) + --- PASS: TestMultiControlPlane/serial/StartCluster (156.53s) + --- PASS: TestMultiControlPlane/serial/DeployApp (3.02s) + --- PASS: TestMultiControlPlane/serial/PingHostFromPods (0.62s) + --- PASS: TestMultiControlPlane/serial/AddWorkerNode (36.96s) + --- PASS: TestMultiControlPlane/serial/NodeLabels (0.04s) + --- PASS: TestMultiControlPlane/serial/HAppyAfterClusterStart (0.62s) + --- PASS: TestMultiControlPlane/serial/CopyFile (11.14s) + --- PASS: TestMultiControlPlane/serial/StopSecondaryNode (11.00s) + --- PASS: TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop (0.49s) + --- PASS: TestMultiControlPlane/serial/RestartSecondaryNode (33.68s) + --- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart (0.61s) + --- PASS: TestMultiControlPlane/serial/RestartClusterKeepsNodes (167.16s) + --- PASS: TestMultiControlPlane/serial/DeleteSecondaryNode (8.09s) + --- PASS: TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete (0.48s) + --- PASS: TestMultiControlPlane/serial/StopCluster (31.53s) + --- PASS: TestMultiControlPlane/serial/RestartCluster (95.85s) + --- PASS: TestMultiControlPlane/serial/DegradedAfterClusterRestart (0.48s) + --- PASS: TestMultiControlPlane/serial/AddSecondaryNode (43.46s) + --- PASS: TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd (0.62s) +=== RUN TestImageBuild +=== RUN TestImageBuild/serial +=== RUN TestImageBuild/serial/Setup + image_test.go:69: (dbg) Run: out/minikube-linux-amd64 start -p image-645155 --driver=docker --container-runtime=docker + image_test.go:69: (dbg) Done: out/minikube-linux-amd64 start -p image-645155 --driver=docker --container-runtime=docker: (19.352808537s) +=== RUN TestImageBuild/serial/NormalBuild + image_test.go:78: (dbg) Run: out/minikube-linux-amd64 image build -t aaa:latest ./testdata/image-build/test-normal -p image-645155 +=== RUN TestImageBuild/serial/BuildWithBuildArg + image_test.go:99: (dbg) Run: out/minikube-linux-amd64 image build -t aaa:latest --build-opt=build-arg=ENV_A=test_env_str --build-opt=no-cache ./testdata/image-build/test-arg -p image-645155 +=== RUN TestImageBuild/serial/BuildWithDockerIgnore + image_test.go:133: (dbg) Run: out/minikube-linux-amd64 image build -t aaa:latest ./testdata/image-build/test-normal --build-opt=no-cache -p image-645155 +=== RUN TestImageBuild/serial/BuildWithSpecifiedDockerfile + image_test.go:88: (dbg) Run: out/minikube-linux-amd64 image build -t aaa:latest -f inner/Dockerfile ./testdata/image-build/test-f -p image-645155 +=== RUN TestImageBuild/serial/validateImageBuildWithBuildEnv + image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431 + helpers_test.go:175: Cleaning up "image-645155" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p image-645155 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p image-645155: (1.674273179s) +--- PASS: TestImageBuild (22.92s) + --- PASS: TestImageBuild/serial (21.24s) + --- PASS: TestImageBuild/serial/Setup (19.35s) + --- PASS: TestImageBuild/serial/NormalBuild (0.79s) + --- PASS: TestImageBuild/serial/BuildWithBuildArg (0.51s) + --- PASS: TestImageBuild/serial/BuildWithDockerIgnore (0.31s) + --- PASS: TestImageBuild/serial/BuildWithSpecifiedDockerfile (0.28s) + --- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s) +=== RUN TestISOImage + iso_test.go:35: This test requires a VM driver +--- SKIP: TestISOImage (0.00s) +=== RUN TestJSONOutput +=== RUN TestJSONOutput/start +=== RUN TestJSONOutput/start/Command + json_output_test.go:63: (dbg) Run: out/minikube-linux-amd64 start -p json-output-174530 --output=json --user=testUser --memory=3072 --wait=true --driver=docker --container-runtime=docker + json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 start -p json-output-174530 --output=json --user=testUser --memory=3072 --wait=true --driver=docker --container-runtime=docker: (1m7.104035907s) +=== RUN TestJSONOutput/start/Audit +=== RUN TestJSONOutput/start/parallel +=== RUN TestJSONOutput/start/parallel/DistinctCurrentSteps +=== PAUSE TestJSONOutput/start/parallel/DistinctCurrentSteps +=== RUN TestJSONOutput/start/parallel/IncreasingCurrentSteps +=== PAUSE TestJSONOutput/start/parallel/IncreasingCurrentSteps +=== CONT TestJSONOutput/start/parallel/DistinctCurrentSteps +=== CONT TestJSONOutput/start/parallel/IncreasingCurrentSteps +=== RUN TestJSONOutput/pause +=== RUN TestJSONOutput/pause/Command + json_output_test.go:63: (dbg) Run: out/minikube-linux-amd64 pause -p json-output-174530 --output=json --user=testUser +=== RUN TestJSONOutput/pause/Audit +=== RUN TestJSONOutput/pause/parallel +=== RUN TestJSONOutput/pause/parallel/DistinctCurrentSteps +=== PAUSE TestJSONOutput/pause/parallel/DistinctCurrentSteps +=== RUN TestJSONOutput/pause/parallel/IncreasingCurrentSteps +=== PAUSE TestJSONOutput/pause/parallel/IncreasingCurrentSteps +=== CONT TestJSONOutput/pause/parallel/DistinctCurrentSteps +=== CONT TestJSONOutput/pause/parallel/IncreasingCurrentSteps +=== RUN TestJSONOutput/unpause +=== RUN TestJSONOutput/unpause/Command + json_output_test.go:63: (dbg) Run: out/minikube-linux-amd64 unpause -p json-output-174530 --output=json --user=testUser +=== RUN TestJSONOutput/unpause/Audit +=== RUN TestJSONOutput/unpause/parallel +=== RUN TestJSONOutput/unpause/parallel/DistinctCurrentSteps +=== PAUSE TestJSONOutput/unpause/parallel/DistinctCurrentSteps +=== RUN TestJSONOutput/unpause/parallel/IncreasingCurrentSteps +=== PAUSE TestJSONOutput/unpause/parallel/IncreasingCurrentSteps +=== CONT TestJSONOutput/unpause/parallel/DistinctCurrentSteps +=== CONT TestJSONOutput/unpause/parallel/IncreasingCurrentSteps +=== RUN TestJSONOutput/stop +=== RUN TestJSONOutput/stop/Command + json_output_test.go:63: (dbg) Run: out/minikube-linux-amd64 stop -p json-output-174530 --output=json --user=testUser + json_output_test.go:63: (dbg) Done: out/minikube-linux-amd64 stop -p json-output-174530 --output=json --user=testUser: (10.542750154s) +=== RUN TestJSONOutput/stop/Audit +=== RUN TestJSONOutput/stop/parallel +=== RUN TestJSONOutput/stop/parallel/DistinctCurrentSteps +=== PAUSE TestJSONOutput/stop/parallel/DistinctCurrentSteps +=== RUN TestJSONOutput/stop/parallel/IncreasingCurrentSteps +=== PAUSE TestJSONOutput/stop/parallel/IncreasingCurrentSteps +=== CONT TestJSONOutput/stop/parallel/DistinctCurrentSteps +=== CONT TestJSONOutput/stop/parallel/IncreasingCurrentSteps + helpers_test.go:175: Cleaning up "json-output-174530" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p json-output-174530 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p json-output-174530: (1.378458626s) +--- PASS: TestJSONOutput (79.67s) + --- PASS: TestJSONOutput/start (67.10s) + --- PASS: TestJSONOutput/start/Command (67.10s) + --- PASS: TestJSONOutput/start/Audit (0.00s) + --- PASS: TestJSONOutput/start/parallel (0.00s) + --- PASS: TestJSONOutput/start/parallel/DistinctCurrentSteps (0.00s) + --- PASS: TestJSONOutput/start/parallel/IncreasingCurrentSteps (0.00s) + --- PASS: TestJSONOutput/pause (0.34s) + --- PASS: TestJSONOutput/pause/Command (0.34s) + --- PASS: TestJSONOutput/pause/Audit (0.00s) + --- PASS: TestJSONOutput/pause/parallel (0.00s) + --- PASS: TestJSONOutput/pause/parallel/DistinctCurrentSteps (0.00s) + --- PASS: TestJSONOutput/pause/parallel/IncreasingCurrentSteps (0.00s) + --- PASS: TestJSONOutput/unpause (0.30s) + --- PASS: TestJSONOutput/unpause/Command (0.30s) + --- PASS: TestJSONOutput/unpause/Audit (0.00s) + --- PASS: TestJSONOutput/unpause/parallel (0.00s) + --- PASS: TestJSONOutput/unpause/parallel/DistinctCurrentSteps (0.00s) + --- PASS: TestJSONOutput/unpause/parallel/IncreasingCurrentSteps (0.00s) + --- PASS: TestJSONOutput/stop (10.54s) + --- PASS: TestJSONOutput/stop/Command (10.54s) + --- PASS: TestJSONOutput/stop/Audit (0.00s) + --- PASS: TestJSONOutput/stop/parallel (0.00s) + --- PASS: TestJSONOutput/stop/parallel/DistinctCurrentSteps (0.00s) + --- PASS: TestJSONOutput/stop/parallel/IncreasingCurrentSteps (0.00s) +=== RUN TestErrorJSONOutput + json_output_test.go:160: (dbg) Run: out/minikube-linux-amd64 start -p json-output-error-479843 --memory=3072 --output=json --wait=true --driver=fail + json_output_test.go:160: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p json-output-error-479843 --memory=3072 --output=json --wait=true --driver=fail: exit status 56 (38.557233ms) + + -- stdout -- + {"specversion":"1.0","id":"d37796a4-5e17-41a6-a013-771daf7171f5","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[json-output-error-479843] minikube v1.37.0 on Debian 12.12 (kvm/amd64)","name":"Initial Minikube Setup","totalsteps":"19"}} + {"specversion":"1.0","id":"27c6df46-76d8-41e4-9fa1-a71adb71d502","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}} + {"specversion":"1.0","id":"691b8b78-03dd-4218-8c28-1c6513aad7b9","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig"}} + {"specversion":"1.0","id":"c16b8f52-bfb0-4bd5-bf9e-6184e6b04581","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube"}} + {"specversion":"1.0","id":"53983f42-7d33-4507-b551-703fe0c089d0","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-amd64"}} + {"specversion":"1.0","id":"7fcf41fc-f644-44be-bb16-bb81942fdc42","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}} + {"specversion":"1.0","id":"8ae316d4-4fdd-47eb-a274-f5a5f1d2ca12","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"","exitcode":"56","issues":"","message":"The driver 'fail' is not supported on linux/amd64","name":"DRV_UNSUPPORTED_OS","url":""}} + + -- /stdout -- + helpers_test.go:175: Cleaning up "json-output-error-479843" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p json-output-error-479843 +--- PASS: TestErrorJSONOutput (0.11s) +=== RUN TestKicCustomNetwork +=== RUN TestKicCustomNetwork/create_custom_network + kic_custom_network_test.go:57: (dbg) Run: out/minikube-linux-amd64 start -p docker-network-238099 --network= + kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-amd64 start -p docker-network-238099 --network=: (16.192428706s) + kic_custom_network_test.go:150: (dbg) Run: docker network ls --format {{.Name}} + helpers_test.go:175: Cleaning up "docker-network-238099" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p docker-network-238099 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-network-238099: (1.642457617s) +=== RUN TestKicCustomNetwork/use_default_bridge_network + kic_custom_network_test.go:57: (dbg) Run: out/minikube-linux-amd64 start -p docker-network-237746 --network=bridge + kic_custom_network_test.go:57: (dbg) Done: out/minikube-linux-amd64 start -p docker-network-237746 --network=bridge: (19.499231327s) + kic_custom_network_test.go:150: (dbg) Run: docker network ls --format {{.Name}} + helpers_test.go:175: Cleaning up "docker-network-237746" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p docker-network-237746 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-network-237746: (1.594920641s) +--- PASS: TestKicCustomNetwork (38.95s) + --- PASS: TestKicCustomNetwork/create_custom_network (17.85s) + --- PASS: TestKicCustomNetwork/use_default_bridge_network (21.11s) +=== RUN TestKicExistingNetwork +I1102 23:09:05.717409 37869 cli_runner.go:164] Run: docker network inspect existing-network --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" +W1102 23:09:05.726772 37869 cli_runner.go:211] docker network inspect existing-network --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1 +I1102 23:09:05.726829 37869 network_create.go:284] running [docker network inspect existing-network] to gather additional debugging logs... +I1102 23:09:05.726842 37869 cli_runner.go:164] Run: docker network inspect existing-network +W1102 23:09:05.736108 37869 cli_runner.go:211] docker network inspect existing-network returned with exit code 1 +I1102 23:09:05.736120 37869 network_create.go:287] error running [docker network inspect existing-network]: docker network inspect existing-network: exit status 1 +stdout: +[] + +stderr: +Error response from daemon: network existing-network not found +I1102 23:09:05.736130 37869 network_create.go:289] output of [docker network inspect existing-network]: -- stdout -- +[] + +-- /stdout -- +** stderr ** +Error response from daemon: network existing-network not found + +** /stderr ** +I1102 23:09:05.736222 37869 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" +I1102 23:09:05.745621 37869 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c8e5074369ec IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:4e:e7:c6:2d:b8:92} reservation:} +I1102 23:09:05.745765 37869 network.go:206] using free private subnet 192.168.58.0/24: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc00234ee40} +I1102 23:09:05.745775 37869 network_create.go:124] attempt to create docker network existing-network 192.168.58.0/24 with gateway 192.168.58.1 and MTU of 1500 ... +I1102 23:09:05.745815 37869 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.58.0/24 --gateway=192.168.58.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=existing-network existing-network +I1102 23:09:05.775511 37869 network_create.go:108] docker network existing-network 192.168.58.0/24 created + kic_custom_network_test.go:150: (dbg) Run: docker network ls --format {{.Name}} + kic_custom_network_test.go:93: (dbg) Run: out/minikube-linux-amd64 start -p existing-network-521277 --network=existing-network + kic_custom_network_test.go:93: (dbg) Done: out/minikube-linux-amd64 start -p existing-network-521277 --network=existing-network: (18.720215006s) + helpers_test.go:175: Cleaning up "existing-network-521277" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p existing-network-521277 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p existing-network-521277: (1.62797265s) +I1102 23:09:26.133803 37869 cli_runner.go:164] Run: docker network ls --filter=label=existing-network --format {{.Name}} +--- PASS: TestKicExistingNetwork (20.43s) +=== RUN TestKicCustomSubnet + kic_custom_network_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p custom-subnet-470006 --subnet=192.168.60.0/24 +E1102 23:09:41.970027 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + kic_custom_network_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p custom-subnet-470006 --subnet=192.168.60.0/24: (18.476757589s) + kic_custom_network_test.go:161: (dbg) Run: docker network inspect custom-subnet-470006 --format "{{(index .IPAM.Config 0).Subnet}}" + helpers_test.go:175: Cleaning up "custom-subnet-470006" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p custom-subnet-470006 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p custom-subnet-470006: (1.640603229s) +--- PASS: TestKicCustomSubnet (20.13s) +=== RUN TestKicStaticIP + kic_custom_network_test.go:132: (dbg) Run: out/minikube-linux-amd64 start -p static-ip-413484 --static-ip=192.168.200.200 + kic_custom_network_test.go:132: (dbg) Done: out/minikube-linux-amd64 start -p static-ip-413484 --static-ip=192.168.200.200: (19.425705961s) + kic_custom_network_test.go:138: (dbg) Run: out/minikube-linux-amd64 -p static-ip-413484 ip + helpers_test.go:175: Cleaning up "static-ip-413484" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p static-ip-413484 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p static-ip-413484: (1.617950463s) +--- PASS: TestKicStaticIP (21.12s) +=== RUN TestMainNoArgs + main_test.go:70: (dbg) Run: out/minikube-linux-amd64 +--- PASS: TestMainNoArgs (0.03s) +=== RUN TestMinikubeProfile + minikube_profile_test.go:44: (dbg) Run: out/minikube-linux-amd64 start -p first-903203 --driver=docker --container-runtime=docker + minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p first-903203 --driver=docker --container-runtime=docker: (17.84780335s) + minikube_profile_test.go:44: (dbg) Run: out/minikube-linux-amd64 start -p second-904607 --driver=docker --container-runtime=docker +E1102 23:10:32.448708 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + minikube_profile_test.go:44: (dbg) Done: out/minikube-linux-amd64 start -p second-904607 --driver=docker --container-runtime=docker: (17.127439606s) + minikube_profile_test.go:51: (dbg) Run: out/minikube-linux-amd64 profile first-903203 + minikube_profile_test.go:55: (dbg) Run: out/minikube-linux-amd64 profile list -ojson + minikube_profile_test.go:51: (dbg) Run: out/minikube-linux-amd64 profile second-904607 + minikube_profile_test.go:55: (dbg) Run: out/minikube-linux-amd64 profile list -ojson + helpers_test.go:175: Cleaning up "second-904607" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p second-904607 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p second-904607: (1.651960767s) + helpers_test.go:175: Cleaning up "first-903203" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p first-903203 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p first-903203: (1.673442069s) +--- PASS: TestMinikubeProfile (39.10s) +=== RUN TestMountStart +=== RUN TestMountStart/serial +=== RUN TestMountStart/serial/StartWithMountFirst + mount_start_test.go:118: (dbg) Run: out/minikube-linux-amd64 start -p mount-start-1-069795 --memory=3072 --mount-string /tmp/TestMountStartserial2298826510/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker --container-runtime=docker + mount_start_test.go:118: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-1-069795 --memory=3072 --mount-string /tmp/TestMountStartserial2298826510/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46464 --mount-uid 0 --no-kubernetes --driver=docker --container-runtime=docker: (4.518800036s) +=== RUN TestMountStart/serial/VerifyMountFirst + mount_start_test.go:134: (dbg) Run: out/minikube-linux-amd64 -p mount-start-1-069795 ssh -- ls /minikube-host +=== RUN TestMountStart/serial/StartWithMountSecond + mount_start_test.go:118: (dbg) Run: out/minikube-linux-amd64 start -p mount-start-2-081479 --memory=3072 --mount-string /tmp/TestMountStartserial2298826510/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker --container-runtime=docker + mount_start_test.go:118: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-081479 --memory=3072 --mount-string /tmp/TestMountStartserial2298826510/001:/minikube-host --mount-gid 0 --mount-msize 6543 --mount-port 46465 --mount-uid 0 --no-kubernetes --driver=docker --container-runtime=docker: (3.933841266s) +=== RUN TestMountStart/serial/VerifyMountSecond + mount_start_test.go:134: (dbg) Run: out/minikube-linux-amd64 -p mount-start-2-081479 ssh -- ls /minikube-host +=== RUN TestMountStart/serial/DeleteFirst + pause_test.go:132: (dbg) Run: out/minikube-linux-amd64 delete -p mount-start-1-069795 --alsologtostderr -v=5 + pause_test.go:132: (dbg) Done: out/minikube-linux-amd64 delete -p mount-start-1-069795 --alsologtostderr -v=5: (1.301541697s) +=== RUN TestMountStart/serial/VerifyMountPostDelete + mount_start_test.go:134: (dbg) Run: out/minikube-linux-amd64 -p mount-start-2-081479 ssh -- ls /minikube-host +=== RUN TestMountStart/serial/Stop + mount_start_test.go:196: (dbg) Run: out/minikube-linux-amd64 stop -p mount-start-2-081479 + mount_start_test.go:196: (dbg) Done: out/minikube-linux-amd64 stop -p mount-start-2-081479: (1.146755789s) +=== RUN TestMountStart/serial/RestartStopped + mount_start_test.go:207: (dbg) Run: out/minikube-linux-amd64 start -p mount-start-2-081479 +E1102 23:11:05.030130 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + mount_start_test.go:207: (dbg) Done: out/minikube-linux-amd64 start -p mount-start-2-081479: (6.079668921s) +=== RUN TestMountStart/serial/VerifyMountPostStop + mount_start_test.go:134: (dbg) Run: out/minikube-linux-amd64 -p mount-start-2-081479 ssh -- ls /minikube-host + helpers_test.go:175: Cleaning up "mount-start-2-081479" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p mount-start-2-081479 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p mount-start-2-081479: (1.28844615s) + helpers_test.go:175: Cleaning up "mount-start-1-069795" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p mount-start-1-069795 +--- PASS: TestMountStart (22.08s) + --- PASS: TestMountStart/serial (20.71s) + --- PASS: TestMountStart/serial/StartWithMountFirst (5.52s) + --- PASS: TestMountStart/serial/VerifyMountFirst (0.18s) + --- PASS: TestMountStart/serial/StartWithMountSecond (4.93s) + --- PASS: TestMountStart/serial/VerifyMountSecond (0.18s) + --- PASS: TestMountStart/serial/DeleteFirst (1.30s) + --- PASS: TestMountStart/serial/VerifyMountPostDelete (0.18s) + --- PASS: TestMountStart/serial/Stop (1.15s) + --- PASS: TestMountStart/serial/RestartStopped (7.08s) + --- PASS: TestMountStart/serial/VerifyMountPostStop (0.18s) +=== RUN TestMultiNode + multinode_test.go:45: (dbg) Run: docker version -f {{.Server.Version}} +=== RUN TestMultiNode/serial +=== RUN TestMultiNode/serial/FreshStart2Nodes + multinode_test.go:96: (dbg) Run: out/minikube-linux-amd64 start -p multinode-809166 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=docker --container-runtime=docker + multinode_test.go:96: (dbg) Done: out/minikube-linux-amd64 start -p multinode-809166 --wait=true --memory=3072 --nodes=2 -v=5 --alsologtostderr --driver=docker --container-runtime=docker: (1m13.053089069s) + multinode_test.go:102: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr +=== RUN TestMultiNode/serial/DeployApp2Nodes + multinode_test.go:493: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- apply -f ./testdata/multinodes/multinode-pod-dns-test.yaml + multinode_test.go:498: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- rollout status deployment/busybox + multinode_test.go:498: (dbg) Done: out/minikube-linux-amd64 kubectl -p multinode-809166 -- rollout status deployment/busybox: (1.77817772s) + multinode_test.go:505: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- get pods -o jsonpath='{.items[*].status.podIP}' + multinode_test.go:528: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- get pods -o jsonpath='{.items[*].metadata.name}' + multinode_test.go:536: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-dx9j2 -- nslookup kubernetes.io + multinode_test.go:536: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-mzzt2 -- nslookup kubernetes.io + multinode_test.go:546: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-dx9j2 -- nslookup kubernetes.default + multinode_test.go:546: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-mzzt2 -- nslookup kubernetes.default + multinode_test.go:554: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-dx9j2 -- nslookup kubernetes.default.svc.cluster.local + multinode_test.go:554: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-mzzt2 -- nslookup kubernetes.default.svc.cluster.local +=== RUN TestMultiNode/serial/PingHostFrom2Pods + multinode_test.go:564: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- get pods -o jsonpath='{.items[*].metadata.name}' + multinode_test.go:572: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-dx9j2 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3" + multinode_test.go:583: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-dx9j2 -- sh -c "ping -c 1 192.168.67.1" + multinode_test.go:572: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-mzzt2 -- sh -c "nslookup host.minikube.internal | awk 'NR==5' | cut -d' ' -f3" + multinode_test.go:583: (dbg) Run: out/minikube-linux-amd64 kubectl -p multinode-809166 -- exec busybox-7b57f96db7-mzzt2 -- sh -c "ping -c 1 192.168.67.1" +=== RUN TestMultiNode/serial/AddNode + multinode_test.go:121: (dbg) Run: out/minikube-linux-amd64 node add -p multinode-809166 -v=5 --alsologtostderr + multinode_test.go:121: (dbg) Done: out/minikube-linux-amd64 node add -p multinode-809166 -v=5 --alsologtostderr: (27.698546519s) + multinode_test.go:127: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr +=== RUN TestMultiNode/serial/MultiNodeLabels + multinode_test.go:221: (dbg) Run: kubectl --context multinode-809166 get nodes -o "jsonpath=[{range .items[*]}{.metadata.labels},{end}]" +=== RUN TestMultiNode/serial/ProfileList + multinode_test.go:143: (dbg) Run: out/minikube-linux-amd64 profile list --output json +=== RUN TestMultiNode/serial/CopyFile + multinode_test.go:184: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --output json --alsologtostderr + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp testdata/cp-test.txt multinode-809166:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2497004503/001/cp-test_multinode-809166.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166:/home/docker/cp-test.txt multinode-809166-m02:/home/docker/cp-test_multinode-809166_multinode-809166-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test_multinode-809166_multinode-809166-m02.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166:/home/docker/cp-test.txt multinode-809166-m03:/home/docker/cp-test_multinode-809166_multinode-809166-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test_multinode-809166_multinode-809166-m03.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp testdata/cp-test.txt multinode-809166-m02:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m02:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2497004503/001/cp-test_multinode-809166-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m02:/home/docker/cp-test.txt multinode-809166:/home/docker/cp-test_multinode-809166-m02_multinode-809166.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test_multinode-809166-m02_multinode-809166.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m02:/home/docker/cp-test.txt multinode-809166-m03:/home/docker/cp-test_multinode-809166-m02_multinode-809166-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test_multinode-809166-m02_multinode-809166-m03.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp testdata/cp-test.txt multinode-809166-m03:/home/docker/cp-test.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m03:/home/docker/cp-test.txt /tmp/TestMultiNodeserialCopyFile2497004503/001/cp-test_multinode-809166-m03.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m03:/home/docker/cp-test.txt multinode-809166:/home/docker/cp-test_multinode-809166-m03_multinode-809166.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166 "sudo cat /home/docker/cp-test_multinode-809166-m03_multinode-809166.txt" + helpers_test.go:573: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 cp multinode-809166-m03:/home/docker/cp-test.txt multinode-809166-m02:/home/docker/cp-test_multinode-809166-m03_multinode-809166-m02.txt + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m03 "sudo cat /home/docker/cp-test.txt" + helpers_test.go:551: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 ssh -n multinode-809166-m02 "sudo cat /home/docker/cp-test_multinode-809166-m03_multinode-809166-m02.txt" +=== RUN TestMultiNode/serial/StopNode + multinode_test.go:248: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 node stop m03 + multinode_test.go:248: (dbg) Done: out/minikube-linux-amd64 -p multinode-809166 node stop m03: (1.145934046s) + multinode_test.go:254: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status + multinode_test.go:254: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-809166 status: exit status 7 (339.037047ms) + + -- stdout -- + multinode-809166 + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + + multinode-809166-m02 + type: Worker + host: Running + kubelet: Running + + multinode-809166-m03 + type: Worker + host: Stopped + kubelet: Stopped + + + -- /stdout -- + multinode_test.go:261: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr + multinode_test.go:261: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr: exit status 7 (329.144856ms) + + -- stdout -- + multinode-809166 + type: Control Plane + host: Running + kubelet: Running + apiserver: Running + kubeconfig: Configured + + multinode-809166-m02 + type: Worker + host: Running + kubelet: Running + + multinode-809166-m03 + type: Worker + host: Stopped + kubelet: Stopped + + + -- /stdout -- + ** stderr ** + I1102 23:13:01.630168 219331 out.go:360] Setting OutFile to fd 1 ... + I1102 23:13:01.630343 219331 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:13:01.630347 219331 out.go:374] Setting ErrFile to fd 2... + I1102 23:13:01.630350 219331 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:13:01.630446 219331 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:13:01.630548 219331 out.go:368] Setting JSON to false + I1102 23:13:01.630562 219331 mustload.go:66] Loading cluster: multinode-809166 + I1102 23:13:01.630650 219331 notify.go:221] Checking for updates... + I1102 23:13:01.630797 219331 config.go:182] Loaded profile config "multinode-809166": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:13:01.630803 219331 status.go:174] checking status of multinode-809166 ... + I1102 23:13:01.631144 219331 cli_runner.go:164] Run: docker container inspect multinode-809166 --format={{.State.Status}} + I1102 23:13:01.641158 219331 status.go:371] multinode-809166 host status = "Running" (err=) + I1102 23:13:01.641171 219331 host.go:66] Checking if "multinode-809166" exists ... + I1102 23:13:01.641335 219331 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-809166 + I1102 23:13:01.651179 219331 host.go:66] Checking if "multinode-809166" exists ... + I1102 23:13:01.651353 219331 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:13:01.651384 219331 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-809166 + I1102 23:13:01.661077 219331 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32909 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/multinode-809166/id_rsa Username:docker} + I1102 23:13:01.745949 219331 ssh_runner.go:195] Run: systemctl --version + I1102 23:13:01.749471 219331 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet + I1102 23:13:01.756538 219331 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:13:01.785655 219331 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:3 ContainersRunning:2 ContainersPaused:0 ContainersStopped:1 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:51 OomKillDisable:false NGoroutines:71 SystemTime:2025-11-02 23:13:01.779842276 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:13:01.785993 219331 kubeconfig.go:125] found "multinode-809166" server: "https://192.168.67.2:8443" + I1102 23:13:01.786004 219331 api_server.go:166] Checking apiserver status ... + I1102 23:13:01.786031 219331 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* + I1102 23:13:01.793747 219331 ssh_runner.go:195] Run: sudo egrep ^[0-9]+:freezer: /proc/2085/cgroup + W1102 23:13:01.798664 219331 api_server.go:177] unable to find freezer cgroup: sudo egrep ^[0-9]+:freezer: /proc/2085/cgroup: Process exited with status 1 + stdout: + + stderr: + I1102 23:13:01.798703 219331 ssh_runner.go:195] Run: ls + I1102 23:13:01.800756 219331 api_server.go:253] Checking apiserver healthz at https://192.168.67.2:8443/healthz ... + I1102 23:13:01.802895 219331 api_server.go:279] https://192.168.67.2:8443/healthz returned 200: + ok + I1102 23:13:01.802905 219331 status.go:463] multinode-809166 apiserver status = Running (err=) + I1102 23:13:01.802910 219331 status.go:176] multinode-809166 status: &{Name:multinode-809166 Host:Running Kubelet:Running APIServer:Running Kubeconfig:Configured Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:13:01.802928 219331 status.go:174] checking status of multinode-809166-m02 ... + I1102 23:13:01.803093 219331 cli_runner.go:164] Run: docker container inspect multinode-809166-m02 --format={{.State.Status}} + I1102 23:13:01.812302 219331 status.go:371] multinode-809166-m02 host status = "Running" (err=) + I1102 23:13:01.812311 219331 host.go:66] Checking if "multinode-809166-m02" exists ... + I1102 23:13:01.812458 219331 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" multinode-809166-m02 + I1102 23:13:01.821849 219331 host.go:66] Checking if "multinode-809166-m02" exists ... + I1102 23:13:01.822014 219331 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:13:01.822046 219331 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" multinode-809166-m02 + I1102 23:13:01.831603 219331 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32914 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/multinode-809166-m02/id_rsa Username:docker} + I1102 23:13:01.914840 219331 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet + I1102 23:13:01.921722 219331 status.go:176] multinode-809166-m02 status: &{Name:multinode-809166-m02 Host:Running Kubelet:Running APIServer:Irrelevant Kubeconfig:Irrelevant Worker:true TimeToStop: DockerEnv: PodManEnv:} + I1102 23:13:01.921733 219331 status.go:174] checking status of multinode-809166-m03 ... + I1102 23:13:01.921903 219331 cli_runner.go:164] Run: docker container inspect multinode-809166-m03 --format={{.State.Status}} + I1102 23:13:01.931027 219331 status.go:371] multinode-809166-m03 host status = "Stopped" (err=) + I1102 23:13:01.931035 219331 status.go:384] host is not running, skipping remaining checks + I1102 23:13:01.931043 219331 status.go:176] multinode-809166-m03 status: &{Name:multinode-809166-m03 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:} + + ** /stderr ** +=== RUN TestMultiNode/serial/StartAfterStop + multinode_test.go:282: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 node start m03 -v=5 --alsologtostderr + multinode_test.go:282: (dbg) Done: out/minikube-linux-amd64 -p multinode-809166 node start m03 -v=5 --alsologtostderr: (6.09812348s) + multinode_test.go:290: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status -v=5 --alsologtostderr + multinode_test.go:306: (dbg) Run: kubectl get nodes +=== RUN TestMultiNode/serial/RestartKeepsNodes + multinode_test.go:314: (dbg) Run: out/minikube-linux-amd64 node list -p multinode-809166 + multinode_test.go:321: (dbg) Run: out/minikube-linux-amd64 stop -p multinode-809166 + multinode_test.go:321: (dbg) Done: out/minikube-linux-amd64 stop -p multinode-809166: (22.130524993s) + multinode_test.go:326: (dbg) Run: out/minikube-linux-amd64 start -p multinode-809166 --wait=true -v=5 --alsologtostderr + multinode_test.go:326: (dbg) Done: out/minikube-linux-amd64 start -p multinode-809166 --wait=true -v=5 --alsologtostderr: (45.742450499s) + multinode_test.go:331: (dbg) Run: out/minikube-linux-amd64 node list -p multinode-809166 +=== RUN TestMultiNode/serial/DeleteNode + multinode_test.go:416: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 node delete m03 + multinode_test.go:416: (dbg) Done: out/minikube-linux-amd64 -p multinode-809166 node delete m03: (4.087761237s) + multinode_test.go:422: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr + multinode_test.go:436: (dbg) Run: kubectl get nodes + multinode_test.go:444: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'" +=== RUN TestMultiNode/serial/StopMultiNode + multinode_test.go:345: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 stop + multinode_test.go:345: (dbg) Done: out/minikube-linux-amd64 -p multinode-809166 stop: (20.992305338s) + multinode_test.go:351: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status +E1102 23:14:41.962026 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + multinode_test.go:351: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-809166 status: exit status 7 (51.667294ms) + + -- stdout -- + multinode-809166 + type: Control Plane + host: Stopped + kubelet: Stopped + apiserver: Stopped + kubeconfig: Stopped + + multinode-809166-m02 + type: Worker + host: Stopped + kubelet: Stopped + + + -- /stdout -- + multinode_test.go:358: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr + multinode_test.go:358: (dbg) Non-zero exit: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr: exit status 7 (48.816706ms) + + -- stdout -- + multinode-809166 + type: Control Plane + host: Stopped + kubelet: Stopped + apiserver: Stopped + kubeconfig: Stopped + + multinode-809166-m02 + type: Worker + host: Stopped + kubelet: Stopped + + + -- /stdout -- + ** stderr ** + I1102 23:14:41.996966 233298 out.go:360] Setting OutFile to fd 1 ... + I1102 23:14:41.997132 233298 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:14:41.997136 233298 out.go:374] Setting ErrFile to fd 2... + I1102 23:14:41.997139 233298 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:14:41.997259 233298 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:14:41.997359 233298 out.go:368] Setting JSON to false + I1102 23:14:41.997369 233298 mustload.go:66] Loading cluster: multinode-809166 + I1102 23:14:41.997390 233298 notify.go:221] Checking for updates... + I1102 23:14:41.997591 233298 config.go:182] Loaded profile config "multinode-809166": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:14:41.997597 233298 status.go:174] checking status of multinode-809166 ... + I1102 23:14:41.997906 233298 cli_runner.go:164] Run: docker container inspect multinode-809166 --format={{.State.Status}} + I1102 23:14:42.008265 233298 status.go:371] multinode-809166 host status = "Stopped" (err=) + I1102 23:14:42.008276 233298 status.go:384] host is not running, skipping remaining checks + I1102 23:14:42.008279 233298 status.go:176] multinode-809166 status: &{Name:multinode-809166 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:false TimeToStop: DockerEnv: PodManEnv:} + I1102 23:14:42.008294 233298 status.go:174] checking status of multinode-809166-m02 ... + I1102 23:14:42.008507 233298 cli_runner.go:164] Run: docker container inspect multinode-809166-m02 --format={{.State.Status}} + I1102 23:14:42.018909 233298 status.go:371] multinode-809166-m02 host status = "Stopped" (err=) + I1102 23:14:42.018939 233298 status.go:384] host is not running, skipping remaining checks + I1102 23:14:42.018943 233298 status.go:176] multinode-809166-m02 status: &{Name:multinode-809166-m02 Host:Stopped Kubelet:Stopped APIServer:Stopped Kubeconfig:Stopped Worker:true TimeToStop: DockerEnv: PodManEnv:} + + ** /stderr ** +=== RUN TestMultiNode/serial/RestartMultiNode + multinode_test.go:376: (dbg) Run: out/minikube-linux-amd64 start -p multinode-809166 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker + multinode_test.go:376: (dbg) Done: out/minikube-linux-amd64 start -p multinode-809166 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker: (43.781181762s) + multinode_test.go:382: (dbg) Run: out/minikube-linux-amd64 -p multinode-809166 status --alsologtostderr + multinode_test.go:396: (dbg) Run: kubectl get nodes + multinode_test.go:404: (dbg) Run: kubectl get nodes -o "go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'" +=== RUN TestMultiNode/serial/ValidateNameConflict + multinode_test.go:455: (dbg) Run: out/minikube-linux-amd64 node list -p multinode-809166 + multinode_test.go:464: (dbg) Run: out/minikube-linux-amd64 start -p multinode-809166-m02 --driver=docker --container-runtime=docker + multinode_test.go:464: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p multinode-809166-m02 --driver=docker --container-runtime=docker: exit status 14 (39.645657ms) + + -- stdout -- + * [multinode-809166-m02] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + - MINIKUBE_BIN=out/minikube-linux-amd64 + - MINIKUBE_FORCE_SYSTEMD= + + + + -- /stdout -- + ** stderr ** + ! Profile name 'multinode-809166-m02' is duplicated with machine name 'multinode-809166-m02' in profile 'multinode-809166' + X Exiting due to MK_USAGE: Profile name should be unique + + ** /stderr ** + multinode_test.go:472: (dbg) Run: out/minikube-linux-amd64 start -p multinode-809166-m03 --driver=docker --container-runtime=docker +E1102 23:15:32.448657 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + multinode_test.go:472: (dbg) Done: out/minikube-linux-amd64 start -p multinode-809166-m03 --driver=docker --container-runtime=docker: (19.017089317s) + multinode_test.go:479: (dbg) Run: out/minikube-linux-amd64 node add -p multinode-809166 + multinode_test.go:479: (dbg) Non-zero exit: out/minikube-linux-amd64 node add -p multinode-809166: exit status 80 (188.462511ms) + + -- stdout -- + * Adding node m03 to cluster multinode-809166 as [worker] + + + + -- /stdout -- + ** stderr ** + X Exiting due to GUEST_NODE_ADD: failed to add node: Node multinode-809166-m03 already exists in multinode-809166-m03 profile + * + ╭─────────────────────────────────────────────────────────────────────────────────────────────╮ + │ │ + │ * If the above advice does not help, please let us know: │ + │ https://github.com/kubernetes/minikube/issues/new/choose │ + │ │ + │ * Please run `minikube logs --file=logs.txt` and attach logs.txt to the GitHub issue. │ + │ * Please also attach the following file to the GitHub issue: │ + │ * - /tmp/minikube_node_6199dd17e6dc619217c30d204ca9467a1523d733_0.log │ + │ │ + ╰─────────────────────────────────────────────────────────────────────────────────────────────╯ + + ** /stderr ** + multinode_test.go:484: (dbg) Run: out/minikube-linux-amd64 delete -p multinode-809166-m03 + multinode_test.go:484: (dbg) Done: out/minikube-linux-amd64 delete -p multinode-809166-m03: (1.644908583s) + helpers_test.go:175: Cleaning up "multinode-809166" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p multinode-809166 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p multinode-809166: (3.252288956s) +--- PASS: TestMultiNode (281.78s) + --- PASS: TestMultiNode/serial (278.51s) + --- PASS: TestMultiNode/serial/FreshStart2Nodes (73.41s) + --- PASS: TestMultiNode/serial/DeployApp2Nodes (2.75s) + --- PASS: TestMultiNode/serial/PingHostFrom2Pods (0.43s) + --- PASS: TestMultiNode/serial/AddNode (28.17s) + --- PASS: TestMultiNode/serial/MultiNodeLabels (0.04s) + --- PASS: TestMultiNode/serial/ProfileList (0.46s) + --- PASS: TestMultiNode/serial/CopyFile (6.25s) + --- PASS: TestMultiNode/serial/StopNode (1.81s) + --- PASS: TestMultiNode/serial/StartAfterStop (6.58s) + --- PASS: TestMultiNode/serial/RestartKeepsNodes (67.93s) + --- PASS: TestMultiNode/serial/DeleteNode (4.49s) + --- PASS: TestMultiNode/serial/StopMultiNode (21.09s) + --- PASS: TestMultiNode/serial/RestartMultiNode (44.18s) + --- PASS: TestMultiNode/serial/ValidateNameConflict (20.92s) +=== RUN TestNetworkPlugins +=== PAUSE TestNetworkPlugins +=== RUN TestNoKubernetes +=== PAUSE TestNoKubernetes +=== RUN TestChangeNoneUser + none_test.go:38: Test requires none driver and SUDO_USER env to not be empty +--- SKIP: TestChangeNoneUser (0.00s) +=== RUN TestPause +=== PAUSE TestPause +=== RUN TestPreload + preload_test.go:43: (dbg) Run: out/minikube-linux-amd64 start -p test-preload-092353 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0 + preload_test.go:43: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-092353 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0: (1m1.684290294s) + preload_test.go:51: (dbg) Run: out/minikube-linux-amd64 -p test-preload-092353 image pull gcr.io/k8s-minikube/busybox + preload_test.go:57: (dbg) Run: out/minikube-linux-amd64 stop -p test-preload-092353 +E1102 23:16:55.511359 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + preload_test.go:57: (dbg) Done: out/minikube-linux-amd64 stop -p test-preload-092353: (5.533150628s) + preload_test.go:65: (dbg) Run: out/minikube-linux-amd64 start -p test-preload-092353 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker + preload_test.go:65: (dbg) Done: out/minikube-linux-amd64 start -p test-preload-092353 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker: (53.319389971s) + preload_test.go:70: (dbg) Run: out/minikube-linux-amd64 -p test-preload-092353 image list + helpers_test.go:175: Cleaning up "test-preload-092353" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p test-preload-092353 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p test-preload-092353: (1.717158419s) +--- PASS: TestPreload (123.21s) +=== RUN TestScheduledStopWindows + scheduled_stop_test.go:42: test only runs on windows +--- SKIP: TestScheduledStopWindows (0.00s) +=== RUN TestScheduledStopUnix + scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-amd64 start -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker + scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-amd64 start -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker: (18.556015798s) + scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-amd64 stop -p scheduled-stop-206205 --schedule 5m + scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-amd64 status --format={{.TimeToStop}} -p scheduled-stop-206205 -n scheduled-stop-206205 + scheduled_stop_test.go:169: signal error was: + scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-amd64 stop -p scheduled-stop-206205 --schedule 15s + scheduled_stop_test.go:169: signal error was: + scheduled_stop_test.go:98: process 256904 running but should have been killed on reschedule of stop + panic.go:636: *** TestScheduledStopUnix FAILED at 2025-11-02 23:18:12.413480736 +0000 UTC m=+1905.177894699 + helpers_test.go:222: -----------------------post-mortem-------------------------------- + helpers_test.go:223: ======> post-mortem[TestScheduledStopUnix]: network settings <====== + helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="" HTTPS_PROXY="" NO_PROXY="" + helpers_test.go:238: ======> post-mortem[TestScheduledStopUnix]: docker inspect <====== + helpers_test.go:239: (dbg) Run: docker inspect scheduled-stop-206205 + helpers_test.go:243: (dbg) docker inspect scheduled-stop-206205: + + -- stdout -- + [ + { + "Id": "31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff", + "Created": "2025-11-02T23:17:57.681999844Z", + "Path": "/usr/local/bin/entrypoint", + "Args": [ + "/sbin/init" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 254030, + "ExitCode": 0, + "Error": "", + "StartedAt": "2025-11-02T23:17:57.692769206Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "Image": "sha256:a1caeebaf98ed0136731e905a1e086f77985a42c2ebb5a7e0b3d0bd7fcbe10cc", + "ResolvConfPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/resolv.conf", + "HostnamePath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/hostname", + "HostsPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/hosts", + "LogPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff-json.log", + "Name": "/scheduled-stop-206205", + "RestartCount": 0, + "Driver": "overlay2", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": [ + "scheduled-stop-206205:/var", + "/lib/modules:/lib/modules:ro" + ], + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "scheduled-stop-206205", + "PortBindings": { + "22/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "2376/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "32443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "5000/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "8443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ] + }, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "ConsoleSize": [ + 0, + 0 + ], + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": true, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": [ + "seccomp=unconfined", + "apparmor=unconfined", + "label=disable" + ], + "Tmpfs": { + "/run": "", + "/tmp": "" + }, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 67108864, + "Runtime": "runc", + "Isolation": "", + "CpuShares": 0, + "Memory": 3221225472, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": [], + "BlkioDeviceWriteBps": [], + "BlkioDeviceReadIOps": [], + "BlkioDeviceWriteIOps": [], + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "MemoryReservation": 0, + "MemorySwap": 6442450944, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": [], + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": null, + "ReadonlyPaths": null + }, + "GraphDriver": { + "Data": { + "ID": "31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff", + "LowerDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc-init/diff:/docker-graph/overlay2/2c0bdfc211c693b1d687c1518f6a16150f580942f571d34b0e676e2dce7580a1/diff", + "MergedDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/merged", + "UpperDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/diff", + "WorkDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/work" + }, + "Name": "overlay2" + }, + "Mounts": [ + { + "Type": "volume", + "Name": "scheduled-stop-206205", + "Source": "/docker-graph/volumes/scheduled-stop-206205/_data", + "Destination": "/var", + "Driver": "local", + "Mode": "z", + "RW": true, + "Propagation": "" + }, + { + "Type": "bind", + "Source": "/lib/modules", + "Destination": "/lib/modules", + "Mode": "ro", + "RW": false, + "Propagation": "rprivate" + } + ], + "Config": { + "Hostname": "scheduled-stop-206205", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "22/tcp": {}, + "2376/tcp": {}, + "32443/tcp": {}, + "5000/tcp": {}, + "8443/tcp": {} + }, + "Tty": true, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "container=docker", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": null, + "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8", + "Volumes": null, + "WorkingDir": "/", + "Entrypoint": [ + "/usr/local/bin/entrypoint", + "/sbin/init" + ], + "OnBuild": null, + "Labels": { + "created_by.minikube.sigs.k8s.io": "true", + "mode.minikube.sigs.k8s.io": "scheduled-stop-206205", + "name.minikube.sigs.k8s.io": "scheduled-stop-206205", + "role.minikube.sigs.k8s.io": "" + }, + "StopSignal": "SIGRTMIN+3" + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "cbbe92502d7f53f3014e4c7321c79a89e2dd2d7ee88c7f5d7c41d34cea7c13a1", + "SandboxKey": "/var/run/docker/netns/cbbe92502d7f", + "Ports": { + "22/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32969" + } + ], + "2376/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32970" + } + ], + "32443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32973" + } + ], + "5000/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32971" + } + ], + "8443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32972" + } + ] + }, + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "scheduled-stop-206205": { + "IPAMConfig": { + "IPv4Address": "192.168.76.2" + }, + "Links": null, + "Aliases": null, + "MacAddress": "0a:da:fc:b0:02:65", + "DriverOpts": null, + "GwPriority": 0, + "NetworkID": "66cb68aed641a7c6a3dc7284672d02d3b05dc7509842df0738264f86786d9811", + "EndpointID": "b4003dcb7de9462fe48e835483ccc98d5d1a769f26bf6c438f9cbefa4d27035c", + "Gateway": "192.168.76.1", + "IPAddress": "192.168.76.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "DNSNames": [ + "scheduled-stop-206205", + "31f6056f95e0" + ] + } + } + } + } + ] + + -- /stdout -- + helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-206205 -n scheduled-stop-206205 + helpers_test.go:252: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<< + helpers_test.go:253: ======> post-mortem[TestScheduledStopUnix]: minikube logs <====== + helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p scheduled-stop-206205 logs -n 25 + helpers_test.go:260: TestScheduledStopUnix logs: + -- stdout -- + + ==> Audit <== + ┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────┬──────────┬─────────┬─────────────────────┬─────────────────────┐ + │ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │ + ├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────┼──────────┼─────────┼─────────────────────┼─────────────────────┤ + │ stop │ -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:13 UTC │ 02 Nov 25 23:13 UTC │ + │ start │ -p multinode-809166 --wait=true -v=5 --alsologtostderr │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:13 UTC │ 02 Nov 25 23:14 UTC │ + │ node │ list -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ │ + │ node │ multinode-809166 node delete m03 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:14 UTC │ + │ stop │ multinode-809166 stop │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:14 UTC │ + │ start │ -p multinode-809166 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:15 UTC │ + │ node │ list -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ start │ -p multinode-809166-m02 --driver=docker --container-runtime=docker │ multinode-809166-m02 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ start │ -p multinode-809166-m03 --driver=docker --container-runtime=docker │ multinode-809166-m03 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ node │ add -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ delete │ -p multinode-809166-m03 │ multinode-809166-m03 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ delete │ -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ start │ -p test-preload-092353 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:16 UTC │ + │ image │ test-preload-092353 image pull gcr.io/k8s-minikube/busybox │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:16 UTC │ + │ stop │ -p test-preload-092353 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:16 UTC │ + │ start │ -p test-preload-092353 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:17 UTC │ + │ image │ test-preload-092353 image list │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:17 UTC │ + │ delete │ -p test-preload-092353 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:17 UTC │ + │ start │ -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:18 UTC │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + └─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────┴──────────┴─────────┴─────────────────────┴─────────────────────┘ + + + ==> Last Start <== + Log file created at: 2025/11/02 23:17:53 + Running on machine: ec6b3253-b39b-4dea-b672-e2db97323995 + Binary: Built with gc go1.24.6 for linux/amd64 + Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg + I1102 23:17:53.619434 253591 out.go:360] Setting OutFile to fd 1 ... + I1102 23:17:53.619588 253591 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:17:53.619590 253591 out.go:374] Setting ErrFile to fd 2... + I1102 23:17:53.619592 253591 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:17:53.619724 253591 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:17:53.619971 253591 out.go:368] Setting JSON to false + I1102 23:17:53.626493 253591 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1037808,"bootTime":1761087666,"procs":105,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 23:17:53.626551 253591 start.go:143] virtualization: kvm guest + I1102 23:17:53.626867 253591 out.go:179] * [scheduled-stop-206205] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + I1102 23:17:53.627003 253591 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 23:17:53.627069 253591 notify.go:221] Checking for updates... + I1102 23:17:53.627225 253591 out.go:179] - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 23:17:53.627327 253591 out.go:179] - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 23:17:53.627437 253591 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64 + I1102 23:17:53.627553 253591 out.go:179] - MINIKUBE_FORCE_SYSTEMD= + I1102 23:17:53.628096 253591 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 23:17:53.641440 253591 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 23:17:53.641487 253591 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:17:53.672246 253591 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:49 SystemTime:2025-11-02 23:17:53.666641765 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:17:53.672296 253591 docker.go:319] overlay module found + I1102 23:17:53.672501 253591 out.go:179] * Using the docker driver based on user configuration + I1102 23:17:53.672610 253591 start.go:309] selected driver: docker + I1102 23:17:53.672616 253591 start.go:930] validating driver "docker" against + I1102 23:17:53.672622 253591 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} + I1102 23:17:53.673028 253591 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:17:53.700480 253591 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:49 SystemTime:2025-11-02 23:17:53.695117802 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:17:53.700553 253591 start_flags.go:327] no existing cluster config was found, will generate one from the flags + I1102 23:17:53.700665 253591 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true] + I1102 23:17:53.700875 253591 out.go:179] * Using Docker driver with root privileges + I1102 23:17:53.701059 253591 cni.go:84] Creating CNI manager for "" + I1102 23:17:53.701094 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:17:53.701098 253591 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni + I1102 23:17:53.701125 253591 start.go:353] cluster config: + {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 23:17:53.701307 253591 out.go:179] * Starting "scheduled-stop-206205" primary control-plane node in "scheduled-stop-206205" cluster + I1102 23:17:53.701399 253591 cache.go:124] Beginning downloading kic base image for docker with docker + I1102 23:17:53.701524 253591 out.go:179] * Pulling base image v0.0.48-1760939008-21773 ... + I1102 23:17:53.701631 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:17:53.701658 253591 preload.go:198] Found local preload: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 + I1102 23:17:53.701660 253591 cache.go:59] Caching tarball of preloaded images + I1102 23:17:53.701693 253591 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon + I1102 23:17:53.701721 253591 preload.go:233] Found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download + I1102 23:17:53.701726 253591 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on docker + I1102 23:17:53.701978 253591 profile.go:143] Saving config to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json ... + I1102 23:17:53.701990 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json: {Name:mk241f7c6d5362604a224f88c0eaf0c33a5d7694 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:17:53.712504 253591 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon, skipping pull + I1102 23:17:53.712509 253591 cache.go:148] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 exists in daemon, skipping load + I1102 23:17:53.712516 253591 cache.go:233] Successfully downloaded all kic artifacts + I1102 23:17:53.712531 253591 start.go:360] acquireMachinesLock for scheduled-stop-206205: {Name:mk5eda29c86e1f811472fcc8ec5414843874c055 Clock:{} Delay:500ms Timeout:10m0s Cancel:} + I1102 23:17:53.712606 253591 start.go:364] duration metric: took 67.976µs to acquireMachinesLock for "scheduled-stop-206205" + I1102 23:17:53.712620 253591 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true} + I1102 23:17:53.712654 253591 start.go:125] createHost starting for "" (driver="docker") + I1102 23:17:53.712941 253591 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ... + I1102 23:17:53.713050 253591 start.go:159] libmachine.API.Create for "scheduled-stop-206205" (driver="docker") + I1102 23:17:53.713062 253591 client.go:173] LocalClient.Create starting + I1102 23:17:53.713101 253591 main.go:143] libmachine: Reading certificate data from /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem + I1102 23:17:53.713123 253591 main.go:143] libmachine: Decoding PEM data... + I1102 23:17:53.713130 253591 main.go:143] libmachine: Parsing certificate... + I1102 23:17:53.713163 253591 main.go:143] libmachine: Reading certificate data from /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem + I1102 23:17:53.713175 253591 main.go:143] libmachine: Decoding PEM data... + I1102 23:17:53.713182 253591 main.go:143] libmachine: Parsing certificate... + I1102 23:17:53.713370 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + W1102 23:17:53.722515 253591 cli_runner.go:211] docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1 + I1102 23:17:53.722553 253591 network_create.go:284] running [docker network inspect scheduled-stop-206205] to gather additional debugging logs... + I1102 23:17:53.722582 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 + W1102 23:17:53.731296 253591 cli_runner.go:211] docker network inspect scheduled-stop-206205 returned with exit code 1 + I1102 23:17:53.731304 253591 network_create.go:287] error running [docker network inspect scheduled-stop-206205]: docker network inspect scheduled-stop-206205: exit status 1 + stdout: + [] + + stderr: + Error response from daemon: network scheduled-stop-206205 not found + I1102 23:17:53.731309 253591 network_create.go:289] output of [docker network inspect scheduled-stop-206205]: -- stdout -- + [] + + -- /stdout -- + ** stderr ** + Error response from daemon: network scheduled-stop-206205 not found + + ** /stderr ** + I1102 23:17:53.731374 253591 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + I1102 23:17:53.741002 253591 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c8e5074369ec IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:4e:e7:c6:2d:b8:92} reservation:} + I1102 23:17:53.741112 253591 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-1155e43987f0 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:56:c9:4f:10:02:e5} reservation:} + I1102 23:17:53.741219 253591 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-1a04657d00b3 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:76:0f:75:c5:83:0d} reservation:} + I1102 23:17:53.741377 253591 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001b83cf0} + I1102 23:17:53.741385 253591 network_create.go:124] attempt to create docker network scheduled-stop-206205 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ... + I1102 23:17:53.741420 253591 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-206205 scheduled-stop-206205 + I1102 23:17:53.768719 253591 network_create.go:108] docker network scheduled-stop-206205 192.168.76.0/24 created + I1102 23:17:53.768728 253591 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-206205" container + I1102 23:17:53.768775 253591 cli_runner.go:164] Run: docker ps -a --format {{.Names}} + I1102 23:17:53.777450 253591 cli_runner.go:164] Run: docker volume create scheduled-stop-206205 --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --label created_by.minikube.sigs.k8s.io=true + I1102 23:17:53.786567 253591 oci.go:103] Successfully created a docker volume scheduled-stop-206205 + I1102 23:17:53.786609 253591 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-206205-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --entrypoint /usr/bin/test -v scheduled-stop-206205:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -d /var/lib + I1102 23:17:54.115618 253591 oci.go:107] Successfully prepared a docker volume scheduled-stop-206205 + I1102 23:17:54.115834 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:17:54.115846 253591 kic.go:194] Starting extracting preloaded images to volume ... + I1102 23:17:54.115898 253591 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-206205:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir + I1102 23:17:57.636208 253591 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-206205:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir: (3.520280245s) + I1102 23:17:57.636226 253591 kic.go:203] duration metric: took 3.520377301s to extract preloaded images to volume ... + W1102 23:17:57.636302 253591 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted. + W1102 23:17:57.636321 253591 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted. + I1102 23:17:57.636368 253591 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'" + I1102 23:17:57.672186 253591 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-206205 --name scheduled-stop-206205 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-206205 --network scheduled-stop-206205 --ip 192.168.76.2 --volume scheduled-stop-206205:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 + I1102 23:17:57.898542 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Running}} + I1102 23:17:57.909434 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:57.918896 253591 cli_runner.go:164] Run: docker exec scheduled-stop-206205 stat /var/lib/dpkg/alternatives/iptables + I1102 23:17:57.949230 253591 oci.go:144] the created container "scheduled-stop-206205" has a running status. + I1102 23:17:57.949250 253591 kic.go:225] Creating ssh key for kic: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa... + I1102 23:17:57.994322 253591 kic_runner.go:191] docker (temp): /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) + I1102 23:17:58.005867 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:58.015315 253591 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys + I1102 23:17:58.015321 253591 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-206205 chown docker:docker /home/docker/.ssh/authorized_keys] + I1102 23:17:58.040815 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:58.051004 253591 machine.go:94] provisionDockerMachine start ... + I1102 23:17:58.051069 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:17:58.062794 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:17:58.062986 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:17:58.062993 253591 main.go:143] libmachine: About to run SSH command: + hostname + I1102 23:17:58.063450 253591 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:59844->127.0.0.1:32969: read: connection reset by peer + I1102 23:18:01.183913 253591 main.go:143] libmachine: SSH cmd err, output: : scheduled-stop-206205 + + I1102 23:18:01.183945 253591 ubuntu.go:182] provisioning hostname "scheduled-stop-206205" + I1102 23:18:01.184016 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.195923 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.196086 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.196091 253591 main.go:143] libmachine: About to run SSH command: + sudo hostname scheduled-stop-206205 && echo "scheduled-stop-206205" | sudo tee /etc/hostname + I1102 23:18:01.320196 253591 main.go:143] libmachine: SSH cmd err, output: : scheduled-stop-206205 + + I1102 23:18:01.320254 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.331285 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.331421 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.331428 253591 main.go:143] libmachine: About to run SSH command: + + if ! grep -xq '.*\sscheduled-stop-206205' /etc/hosts; then + if grep -xq '127.0.1.1\s.*' /etc/hosts; then + sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-206205/g' /etc/hosts; + else + echo '127.0.1.1 scheduled-stop-206205' | sudo tee -a /etc/hosts; + fi + fi + I1102 23:18:01.448573 253591 main.go:143] libmachine: SSH cmd err, output: : + I1102 23:18:01.448587 253591 ubuntu.go:188] set auth options {CertDir:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube CaCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem CaPrivateKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem ServerKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server-key.pem ClientKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube} + I1102 23:18:01.448596 253591 ubuntu.go:190] setting up certificates + I1102 23:18:01.448602 253591 provision.go:84] configureAuth start + I1102 23:18:01.448643 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:01.459070 253591 provision.go:143] copyHostCerts + I1102 23:18:01.459118 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem, removing ... + I1102 23:18:01.459122 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem + I1102 23:18:01.459174 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem (1127 bytes) + I1102 23:18:01.459247 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem, removing ... + I1102 23:18:01.459249 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem + I1102 23:18:01.459269 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem (1675 bytes) + I1102 23:18:01.459314 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem, removing ... + I1102 23:18:01.459316 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem + I1102 23:18:01.459339 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem (1082 bytes) + I1102 23:18:01.459376 253591 provision.go:117] generating server cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem ca-key=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem private-key=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem org=minikube.scheduled-stop-206205 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-206205] + I1102 23:18:01.577357 253591 provision.go:177] copyRemoteCerts + I1102 23:18:01.577390 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker + I1102 23:18:01.577415 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.588288 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:01.673715 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes) + I1102 23:18:01.684733 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) + I1102 23:18:01.695653 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes) + I1102 23:18:01.706714 253591 provision.go:87] duration metric: took 258.106022ms to configureAuth + I1102 23:18:01.706725 253591 ubuntu.go:206] setting minikube options for container-runtime + I1102 23:18:01.706839 253591 config.go:182] Loaded profile config "scheduled-stop-206205": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:18:01.706872 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.716737 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.716859 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.716862 253591 main.go:143] libmachine: About to run SSH command: + df --output=fstype / | tail -n 1 + I1102 23:18:01.834233 253591 main.go:143] libmachine: SSH cmd err, output: : overlay + + I1102 23:18:01.834239 253591 ubuntu.go:71] root file system type: overlay + I1102 23:18:01.834305 253591 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ... + I1102 23:18:01.834344 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.844396 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.844526 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.844561 253591 main.go:143] libmachine: About to run SSH command: + sudo mkdir -p /lib/systemd/system && printf %s "[Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \ + -H fd:// --containerd=/run/containerd/containerd.sock \ + -H unix:///var/run/docker.sock \ + --default-ulimit=nofile=1048576:1048576 \ + --tlsverify \ + --tlscacert /etc/docker/ca.pem \ + --tlscert /etc/docker/server.pem \ + --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP \$MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + " | sudo tee /lib/systemd/system/docker.service.new + I1102 23:18:01.969042 253591 main.go:143] libmachine: SSH cmd err, output: : [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + I1102 23:18:01.969085 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.978588 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.978711 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.978718 253591 main.go:143] libmachine: About to run SSH command: + sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } + I1102 23:18:02.484307 253591 main.go:143] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2025-10-08 12:15:50.000000000 +0000 + +++ /lib/systemd/system/docker.service.new 2025-11-02 23:18:01.967025868 +0000 + @@ -9,23 +9,34 @@ + + [Service] + Type=notify + -# the default is not to use systemd for cgroups because the delegate issues still + -# exists and systemd currently does not support the cgroup feature set required + -# for containers run by docker + -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock + -ExecReload=/bin/kill -s HUP $MAINPID + -TimeoutStartSec=0 + -RestartSec=2 + Restart=always + + + + + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. + +# The base configuration already specifies an 'ExecStart=...' command. The first directive + +# here is to clear out that command inherited from the base configuration. Without this, + +# the command from the base configuration and the command specified here are treated as + +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + +# will catch this invalid input and refuse to start the service with an error like: + +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. + +ExecStart= + +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + +ExecReload=/bin/kill -s HUP $MAINPID + + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + +LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + -# Comment TasksMax if your systemd version does not support it. + -# Only systemd 226 and above support this option. + +# Uncomment TasksMax if your systemd version supports it. + +# Only systemd 226 and above support this version. + TasksMax=infinity + +TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install. + Executing: /lib/systemd/systemd-sysv-install enable docker + + I1102 23:18:02.484318 253591 machine.go:97] duration metric: took 4.433308035s to provisionDockerMachine + I1102 23:18:02.484325 253591 client.go:176] duration metric: took 8.771259893s to LocalClient.Create + I1102 23:18:02.484334 253591 start.go:167] duration metric: took 8.771282789s to libmachine.API.Create "scheduled-stop-206205" + I1102 23:18:02.484338 253591 start.go:293] postStartSetup for "scheduled-stop-206205" (driver="docker") + I1102 23:18:02.484344 253591 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] + I1102 23:18:02.484397 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs + I1102 23:18:02.484429 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.495074 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.579870 253591 ssh_runner.go:195] Run: cat /etc/os-release + I1102 23:18:02.581857 253591 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found + I1102 23:18:02.581866 253591 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm) + I1102 23:18:02.581872 253591 filesync.go:126] Scanning /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/addons for local assets ... + I1102 23:18:02.581930 253591 filesync.go:126] Scanning /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files for local assets ... + I1102 23:18:02.582001 253591 filesync.go:149] local asset: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem -> 378692.pem in /etc/ssl/certs + I1102 23:18:02.582092 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs + I1102 23:18:02.586576 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem --> /etc/ssl/certs/378692.pem (1708 bytes) + I1102 23:18:02.597463 253591 start.go:296] duration metric: took 113.117075ms for postStartSetup + I1102 23:18:02.597741 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:02.607698 253591 profile.go:143] Saving config to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json ... + I1102 23:18:02.607906 253591 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:18:02.607947 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.617590 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.700643 253591 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'" + I1102 23:18:02.703460 253591 start.go:128] duration metric: took 8.990801049s to createHost + I1102 23:18:02.703467 253591 start.go:83] releasing machines lock for "scheduled-stop-206205", held for 8.990856965s + I1102 23:18:02.703518 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:02.713584 253591 ssh_runner.go:195] Run: cat /version.json + I1102 23:18:02.713608 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.713674 253591 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/ + I1102 23:18:02.713716 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.724014 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.724375 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.807050 253591 ssh_runner.go:195] Run: systemctl --version + I1102 23:18:02.875009 253591 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*" + W1102 23:18:02.877650 253591 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found + I1102 23:18:02.877697 253591 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ; + I1102 23:18:02.890820 253591 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s) + I1102 23:18:02.890826 253591 start.go:496] detecting cgroup driver to use... + I1102 23:18:02.890842 253591 detect.go:190] detected "systemd" cgroup driver on host os + I1102 23:18:02.890905 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock + " | sudo tee /etc/crictl.yaml" + I1102 23:18:02.899415 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml" + I1102 23:18:02.904831 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml" + I1102 23:18:02.910257 253591 containerd.go:146] configuring containerd to use "systemd" as cgroup driver... + I1102 23:18:02.910293 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml" + I1102 23:18:02.915564 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" + I1102 23:18:02.920719 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml" + I1102 23:18:02.925871 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" + I1102 23:18:02.931165 253591 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk" + I1102 23:18:02.936119 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml" + I1102 23:18:02.941336 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml" + I1102 23:18:02.946588 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml" + I1102 23:18:02.951864 253591 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables + I1102 23:18:02.956385 253591 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" + I1102 23:18:02.960861 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.011321 253591 ssh_runner.go:195] Run: sudo systemctl restart containerd + I1102 23:18:03.065100 253591 start.go:496] detecting cgroup driver to use... + I1102 23:18:03.065116 253591 detect.go:190] detected "systemd" cgroup driver on host os + I1102 23:18:03.065157 253591 ssh_runner.go:195] Run: sudo systemctl cat docker.service + I1102 23:18:03.072378 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd + I1102 23:18:03.079138 253591 ssh_runner.go:195] Run: sudo systemctl stop -f containerd + I1102 23:18:03.088438 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd + I1102 23:18:03.095302 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio + I1102 23:18:03.102214 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock + " | sudo tee /etc/crictl.yaml" + I1102 23:18:03.110662 253591 ssh_runner.go:195] Run: which cri-dockerd + I1102 23:18:03.112679 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d + I1102 23:18:03.117226 253591 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes) + I1102 23:18:03.124875 253591 ssh_runner.go:195] Run: sudo systemctl unmask docker.service + I1102 23:18:03.174772 253591 ssh_runner.go:195] Run: sudo systemctl enable docker.socket + I1102 23:18:03.224815 253591 docker.go:575] configuring docker to use "systemd" as cgroup driver... + I1102 23:18:03.224868 253591 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes) + I1102 23:18:03.232349 253591 ssh_runner.go:195] Run: sudo systemctl reset-failed docker + I1102 23:18:03.239110 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.287733 253591 ssh_runner.go:195] Run: sudo systemctl restart docker + I1102 23:18:03.514202 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker + I1102 23:18:03.521123 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket + I1102 23:18:03.528294 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service + I1102 23:18:03.535614 253591 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket + I1102 23:18:03.587693 253591 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket + I1102 23:18:03.633586 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.680333 253591 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket + I1102 23:18:03.695969 253591 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service + I1102 23:18:03.702678 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.750398 253591 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service + I1102 23:18:03.798855 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service + I1102 23:18:03.805577 253591 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock + I1102 23:18:03.805613 253591 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock + I1102 23:18:03.807636 253591 start.go:564] Will wait 60s for crictl version + I1102 23:18:03.807673 253591 ssh_runner.go:195] Run: which crictl + I1102 23:18:03.809563 253591 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version + I1102 23:18:03.823581 253591 start.go:580] Version: 0.1.0 + RuntimeName: docker + RuntimeVersion: 28.5.1 + RuntimeApiVersion: v1 + I1102 23:18:03.823630 253591 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} + I1102 23:18:03.837461 253591 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} + I1102 23:18:03.851467 253591 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ... + I1102 23:18:03.851513 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + I1102 23:18:03.862865 253591 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts + I1102 23:18:03.864895 253591 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" + I1102 23:18:03.870983 253591 kubeadm.go:884] updating cluster {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ... + I1102 23:18:03.871038 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:18:03.871073 253591 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} + I1102 23:18:03.882220 253591 docker.go:691] Got preloaded images: -- stdout -- + registry.k8s.io/kube-scheduler:v1.34.1 + registry.k8s.io/kube-apiserver:v1.34.1 + registry.k8s.io/kube-controller-manager:v1.34.1 + registry.k8s.io/kube-proxy:v1.34.1 + registry.k8s.io/etcd:3.6.4-0 + registry.k8s.io/pause:3.10.1 + registry.k8s.io/coredns/coredns:v1.12.1 + gcr.io/k8s-minikube/storage-provisioner:v5 + + -- /stdout -- + I1102 23:18:03.882226 253591 docker.go:621] Images already preloaded, skipping extraction + I1102 23:18:03.882267 253591 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} + I1102 23:18:03.893738 253591 docker.go:691] Got preloaded images: -- stdout -- + registry.k8s.io/kube-scheduler:v1.34.1 + registry.k8s.io/kube-apiserver:v1.34.1 + registry.k8s.io/kube-controller-manager:v1.34.1 + registry.k8s.io/kube-proxy:v1.34.1 + registry.k8s.io/etcd:3.6.4-0 + registry.k8s.io/pause:3.10.1 + registry.k8s.io/coredns/coredns:v1.12.1 + gcr.io/k8s-minikube/storage-provisioner:v5 + + -- /stdout -- + I1102 23:18:03.893746 253591 cache_images.go:86] Images are preloaded, skipping loading + I1102 23:18:03.893752 253591 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.34.1 docker true true} ... + I1102 23:18:03.893809 253591 kubeadm.go:947] kubelet [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-206205 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + [Install] + config: + {KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} + I1102 23:18:03.893851 253591 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}} + I1102 23:18:03.921973 253591 cni.go:84] Creating CNI manager for "" + I1102 23:18:03.921989 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:18:03.922001 253591 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16 + I1102 23:18:03.922012 253591 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-206205 NodeName:scheduled-stop-206205 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true} + I1102 23:18:03.922091 253591 kubeadm.go:196] kubeadm config: + apiVersion: kubeadm.k8s.io/v1beta4 + kind: InitConfiguration + localAPIEndpoint: + advertiseAddress: 192.168.76.2 + bindPort: 8443 + bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication + nodeRegistration: + criSocket: unix:///var/run/cri-dockerd.sock + name: "scheduled-stop-206205" + kubeletExtraArgs: + - name: "node-ip" + value: "192.168.76.2" + taints: [] + --- + apiVersion: kubeadm.k8s.io/v1beta4 + kind: ClusterConfiguration + apiServer: + certSANs: ["127.0.0.1", "localhost", "192.168.76.2"] + extraArgs: + - name: "enable-admission-plugins" + value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + controllerManager: + extraArgs: + - name: "allocate-node-cidrs" + value: "true" + - name: "leader-elect" + value: "false" + scheduler: + extraArgs: + - name: "leader-elect" + value: "false" + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + etcd: + local: + dataDir: /var/lib/minikube/etcd + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + cgroupDriver: systemd + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + hairpinMode: hairpin-veth + runtimeRequestTimeout: 15m + clusterDomain: "cluster.local" + # disable disk resource management by default + imageGCHighThresholdPercent: 100 + evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" + failSwapOn: false + staticPodPath: /etc/kubernetes/manifests + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + clusterCIDR: "10.244.0.0/16" + metricsBindAddress: 0.0.0.0:10249 + conntrack: + maxPerCore: 0 + # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s + # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s + + I1102 23:18:03.922133 253591 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1 + I1102 23:18:03.926603 253591 binaries.go:44] Found k8s binaries, skipping transfer + I1102 23:18:03.926638 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube + I1102 23:18:03.931622 253591 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes) + I1102 23:18:03.939311 253591 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes) + I1102 23:18:03.947048 253591 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes) + I1102 23:18:03.954600 253591 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts + I1102 23:18:03.956721 253591 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" + I1102 23:18:03.963188 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:04.023952 253591 ssh_runner.go:195] Run: sudo systemctl start kubelet + I1102 23:18:04.044114 253591 certs.go:69] Setting up /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205 for IP: 192.168.76.2 + I1102 23:18:04.044119 253591 certs.go:195] generating shared ca certs ... + I1102 23:18:04.044130 253591 certs.go:227] acquiring lock for ca certs: {Name:mka14e8ec45c47b14f993e33375454668e25d494 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.044249 253591 certs.go:236] skipping valid "minikubeCA" ca cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.key + I1102 23:18:04.044280 253591 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.key + I1102 23:18:04.044284 253591 certs.go:257] generating profile certs ... + I1102 23:18:04.044325 253591 certs.go:364] generating signed profile cert for "minikube-user": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key + I1102 23:18:04.044340 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt with IP's: [] + I1102 23:18:04.208998 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt ... + I1102 23:18:04.209006 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt: {Name:mk4242b9869b318af135d5a04404d4efc6cf682e Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.209130 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key ... + I1102 23:18:04.209135 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key: {Name:mk897a441a744e4deb312bb31d7e45f305393cb5 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.209203 253591 certs.go:364] generating signed profile cert for "minikube": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c + I1102 23:18:04.209219 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2] + I1102 23:18:04.436006 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c ... + I1102 23:18:04.436013 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c: {Name:mk049a160b33ba00c531396324e27b089b294b76 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.436141 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c ... + I1102 23:18:04.436145 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c: {Name:mkbab4dc88cc36ed893a75442190fa04204c9d4c Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.436195 253591 certs.go:382] copying /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c -> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt + I1102 23:18:04.436247 253591 certs.go:386] copying /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c -> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key + I1102 23:18:04.436289 253591 certs.go:364] generating signed profile cert for "aggregator": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key + I1102 23:18:04.436304 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt with IP's: [] + I1102 23:18:04.486571 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt ... + I1102 23:18:04.486578 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt: {Name:mkc557a3c4e4c839becbe50d45b8ff85e88e77e1 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.486686 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key ... + I1102 23:18:04.486692 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key: {Name:mk133af299b17c585e172e986c844eb5075cf70d Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.486845 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869.pem (1338 bytes) + W1102 23:18:04.486870 253591 certs.go:480] ignoring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869_empty.pem, impossibly tiny 0 bytes + I1102 23:18:04.486875 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem (1675 bytes) + I1102 23:18:04.486892 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem (1082 bytes) + I1102 23:18:04.486908 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem (1127 bytes) + I1102 23:18:04.486943 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem (1675 bytes) + I1102 23:18:04.486978 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem (1708 bytes) + I1102 23:18:04.487364 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes) + I1102 23:18:04.498438 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes) + I1102 23:18:04.509333 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes) + I1102 23:18:04.520074 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes) + I1102 23:18:04.530681 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes) + I1102 23:18:04.541415 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes) + I1102 23:18:04.551866 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes) + I1102 23:18:04.562486 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) + I1102 23:18:04.573100 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem --> /usr/share/ca-certificates/378692.pem (1708 bytes) + I1102 23:18:04.583855 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes) + I1102 23:18:04.595385 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869.pem --> /usr/share/ca-certificates/37869.pem (1338 bytes) + I1102 23:18:04.605855 253591 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes) + I1102 23:18:04.613625 253591 ssh_runner.go:195] Run: openssl version + I1102 23:18:04.616906 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" + I1102 23:18:04.621840 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.624171 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 22:47 /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.624213 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.641639 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" + I1102 23:18:04.646458 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/37869.pem && ln -fs /usr/share/ca-certificates/37869.pem /etc/ssl/certs/37869.pem" + I1102 23:18:04.651254 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/37869.pem + I1102 23:18:04.653315 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 22:52 /usr/share/ca-certificates/37869.pem + I1102 23:18:04.653356 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/37869.pem + I1102 23:18:04.670542 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/37869.pem /etc/ssl/certs/51391683.0" + I1102 23:18:04.675585 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/378692.pem && ln -fs /usr/share/ca-certificates/378692.pem /etc/ssl/certs/378692.pem" + I1102 23:18:04.680499 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/378692.pem + I1102 23:18:04.682640 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 22:52 /usr/share/ca-certificates/378692.pem + I1102 23:18:04.682670 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/378692.pem + I1102 23:18:04.699999 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/378692.pem /etc/ssl/certs/3ec20f2e.0" + I1102 23:18:04.704862 253591 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt + I1102 23:18:04.706859 253591 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1 + stdout: + + stderr: + stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory + I1102 23:18:04.706882 253591 kubeadm.go:401] StartCluster: {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 23:18:04.706959 253591 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} + I1102 23:18:04.717718 253591 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd + I1102 23:18:04.722208 253591 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml + I1102 23:18:04.726655 253591 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver + I1102 23:18:04.726685 253591 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf + I1102 23:18:04.731168 253591 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 + stdout: + + stderr: + ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory + ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory + ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory + ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory + I1102 23:18:04.731172 253591 kubeadm.go:158] found existing configuration files: + + I1102 23:18:04.731205 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf + I1102 23:18:04.735630 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/admin.conf: No such file or directory + I1102 23:18:04.735662 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf + I1102 23:18:04.740078 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf + I1102 23:18:04.744424 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/kubelet.conf: No such file or directory + I1102 23:18:04.744460 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf + I1102 23:18:04.748755 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf + I1102 23:18:04.753213 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/controller-manager.conf: No such file or directory + I1102 23:18:04.753244 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf + I1102 23:18:04.757425 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf + I1102 23:18:04.762105 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/scheduler.conf: No such file or directory + I1102 23:18:04.762141 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf + I1102 23:18:04.766828 253591 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" + I1102 23:18:04.788502 253591 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1 + I1102 23:18:04.788535 253591 kubeadm.go:319] [preflight] Running pre-flight checks + I1102 23:18:04.836991 253591 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster + I1102 23:18:04.837059 253591 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection + I1102 23:18:04.837119 253591 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull' + I1102 23:18:04.843731 253591 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs" + I1102 23:18:04.844227 253591 out.go:252] - Generating certificates and keys ... + I1102 23:18:04.844272 253591 kubeadm.go:319] [certs] Using existing ca certificate authority + I1102 23:18:04.844306 253591 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk + I1102 23:18:04.925670 253591 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key + I1102 23:18:05.123791 253591 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key + I1102 23:18:05.302844 253591 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key + I1102 23:18:05.402793 253591 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key + I1102 23:18:05.657457 253591 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key + I1102 23:18:05.657534 253591 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-206205] and IPs [192.168.76.2 127.0.0.1 ::1] + I1102 23:18:05.867167 253591 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key + I1102 23:18:05.867272 253591 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-206205] and IPs [192.168.76.2 127.0.0.1 ::1] + I1102 23:18:06.006321 253591 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key + I1102 23:18:06.020741 253591 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key + I1102 23:18:06.154439 253591 kubeadm.go:319] [certs] Generating "sa" key and public key + I1102 23:18:06.154498 253591 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes" + I1102 23:18:06.290009 253591 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file + I1102 23:18:06.387763 253591 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file + I1102 23:18:06.443867 253591 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file + I1102 23:18:06.726384 253591 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file + I1102 23:18:06.903302 253591 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file + I1102 23:18:06.903667 253591 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" + I1102 23:18:06.904817 253591 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests" + I1102 23:18:06.905155 253591 out.go:252] - Booting up control plane ... + I1102 23:18:06.905207 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver" + I1102 23:18:06.905368 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager" + I1102 23:18:06.905771 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler" + I1102 23:18:06.923260 253591 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" + I1102 23:18:06.923333 253591 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml" + I1102 23:18:06.926911 253591 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration" + I1102 23:18:06.927029 253591 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + I1102 23:18:06.927059 253591 kubeadm.go:319] [kubelet-start] Starting the kubelet + I1102 23:18:06.999634 253591 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests" + I1102 23:18:06.999719 253591 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s + I1102 23:18:07.500751 253591 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.177161ms + I1102 23:18:07.503256 253591 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s + I1102 23:18:07.503330 253591 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.76.2:8443/livez + I1102 23:18:07.503391 253591 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz + I1102 23:18:07.503444 253591 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez + I1102 23:18:08.102936 253591 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 599.534087ms + I1102 23:18:08.676079 253591 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.172708516s + I1102 23:18:10.004930 253591 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 2.501559348s + I1102 23:18:10.010088 253591 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + I1102 23:18:10.014186 253591 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster + I1102 23:18:10.018106 253591 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs + I1102 23:18:10.018230 253591 kubeadm.go:319] [mark-control-plane] Marking the node scheduled-stop-206205 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] + I1102 23:18:10.021761 253591 kubeadm.go:319] [bootstrap-token] Using token: zzot1a.o97su5nifgm8hbsf + I1102 23:18:10.022194 253591 out.go:252] - Configuring RBAC rules ... + I1102 23:18:10.022262 253591 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles + I1102 23:18:10.023883 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes + I1102 23:18:10.026229 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + I1102 23:18:10.027749 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + I1102 23:18:10.028885 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + I1102 23:18:10.030110 253591 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace + I1102 23:18:10.408133 253591 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key + I1102 23:18:10.814377 253591 kubeadm.go:319] [addons] Applied essential addon: CoreDNS + I1102 23:18:11.407099 253591 kubeadm.go:319] [addons] Applied essential addon: kube-proxy + I1102 23:18:11.407521 253591 kubeadm.go:319] + I1102 23:18:11.407572 253591 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully! + I1102 23:18:11.407575 253591 kubeadm.go:319] + I1102 23:18:11.407619 253591 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user: + I1102 23:18:11.407622 253591 kubeadm.go:319] + I1102 23:18:11.407635 253591 kubeadm.go:319] mkdir -p $HOME/.kube + I1102 23:18:11.407697 253591 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + I1102 23:18:11.407728 253591 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config + I1102 23:18:11.407730 253591 kubeadm.go:319] + I1102 23:18:11.407773 253591 kubeadm.go:319] Alternatively, if you are the root user, you can run: + I1102 23:18:11.407780 253591 kubeadm.go:319] + I1102 23:18:11.407826 253591 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf + I1102 23:18:11.407828 253591 kubeadm.go:319] + I1102 23:18:11.407862 253591 kubeadm.go:319] You should now deploy a pod network to the cluster. + I1102 23:18:11.407927 253591 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + I1102 23:18:11.407975 253591 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/ + I1102 23:18:11.407977 253591 kubeadm.go:319] + I1102 23:18:11.408037 253591 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities + I1102 23:18:11.408084 253591 kubeadm.go:319] and service account keys on each node and then running the following as root: + I1102 23:18:11.408087 253591 kubeadm.go:319] + I1102 23:18:11.408140 253591 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token zzot1a.o97su5nifgm8hbsf \ + I1102 23:18:11.408198 253591 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:481e6c6c81577cc2e0f1743d9bdbb8d6de8e828bf4a5e4190480860442a4bdf3 \ + I1102 23:18:11.408209 253591 kubeadm.go:319] --control-plane + I1102 23:18:11.408211 253591 kubeadm.go:319] + I1102 23:18:11.408274 253591 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root: + I1102 23:18:11.408279 253591 kubeadm.go:319] + I1102 23:18:11.408327 253591 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token zzot1a.o97su5nifgm8hbsf \ + I1102 23:18:11.408387 253591 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:481e6c6c81577cc2e0f1743d9bdbb8d6de8e828bf4a5e4190480860442a4bdf3 + I1102 23:18:11.411428 253591 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service' + I1102 23:18:11.411452 253591 cni.go:84] Creating CNI manager for "" + I1102 23:18:11.411462 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:18:11.411725 253591 out.go:179] * Configuring bridge CNI (Container Networking Interface) ... + I1102 23:18:11.411874 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d + I1102 23:18:11.417306 253591 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes) + I1102 23:18:11.425696 253591 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" + I1102 23:18:11.425760 253591 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig + I1102 23:18:11.425776 253591 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-206205 minikube.k8s.io/updated_at=2025_11_02T23_18_11_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 minikube.k8s.io/name=scheduled-stop-206205 minikube.k8s.io/primary=true + I1102 23:18:11.471856 253591 kubeadm.go:1114] duration metric: took 46.152711ms to wait for elevateKubeSystemPrivileges + I1102 23:18:11.471871 253591 ops.go:34] apiserver oom_adj: -16 + I1102 23:18:11.471876 253591 kubeadm.go:403] duration metric: took 6.76499633s to StartCluster + I1102 23:18:11.471891 253591 settings.go:142] acquiring lock: {Name:mkb9be79a929c9a9a1c960b77da9cebe4afb2abe Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:11.471971 253591 settings.go:150] Updating kubeconfig: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 23:18:11.472443 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig: {Name:mk69953fc2a8af178bf939270c575260f1197035 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:11.472582 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml" + I1102 23:18:11.472577 253591 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true} + I1102 23:18:11.472616 253591 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] + I1102 23:18:11.472671 253591 addons.go:70] Setting storage-provisioner=true in profile "scheduled-stop-206205" + I1102 23:18:11.472676 253591 addons.go:70] Setting default-storageclass=true in profile "scheduled-stop-206205" + I1102 23:18:11.472687 253591 addons.go:239] Setting addon storage-provisioner=true in "scheduled-stop-206205" + I1102 23:18:11.472689 253591 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-206205" + I1102 23:18:11.472709 253591 host.go:66] Checking if "scheduled-stop-206205" exists ... + I1102 23:18:11.472725 253591 config.go:182] Loaded profile config "scheduled-stop-206205": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:18:11.472814 253591 out.go:179] * Verifying Kubernetes components... + I1102 23:18:11.472941 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.473073 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.473107 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:11.486155 253591 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5 + I1102 23:18:11.486490 253591 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml + I1102 23:18:11.486497 253591 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes) + I1102 23:18:11.486549 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:11.486753 253591 addons.go:239] Setting addon default-storageclass=true in "scheduled-stop-206205" + I1102 23:18:11.486771 253591 host.go:66] Checking if "scheduled-stop-206205" exists ... + I1102 23:18:11.487085 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.501406 253591 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml + I1102 23:18:11.501411 253591 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) + I1102 23:18:11.501462 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:11.502573 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:11.512341 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:11.520398 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -" + I1102 23:18:11.545285 253591 ssh_runner.go:195] Run: sudo systemctl start kubelet + I1102 23:18:11.598698 253591 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml + I1102 23:18:11.605838 253591 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml + I1102 23:18:11.609054 253591 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap + I1102 23:18:11.609590 253591 api_server.go:52] waiting for apiserver process to appear ... + I1102 23:18:11.609636 253591 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* + I1102 23:18:11.736315 253591 api_server.go:72] duration metric: took 263.723816ms to wait for apiserver process to appear ... + I1102 23:18:11.736321 253591 api_server.go:88] waiting for apiserver healthz status ... + I1102 23:18:11.736327 253591 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ... + I1102 23:18:11.738794 253591 api_server.go:279] https://192.168.76.2:8443/healthz returned 200: + ok + I1102 23:18:11.739059 253591 out.go:179] * Enabled addons: storage-provisioner, default-storageclass + I1102 23:18:11.739186 253591 addons.go:515] duration metric: took 266.567411ms for enable addons: enabled=[storage-provisioner default-storageclass] + I1102 23:18:11.739206 253591 api_server.go:141] control plane version: v1.34.1 + I1102 23:18:11.739215 253591 api_server.go:131] duration metric: took 2.891343ms to wait for apiserver health ... + I1102 23:18:11.739220 253591 system_pods.go:43] waiting for kube-system pods to appear ... + I1102 23:18:11.740465 253591 system_pods.go:59] 5 kube-system pods found + I1102 23:18:11.740480 253591 system_pods.go:61] "etcd-scheduled-stop-206205" [3a3467ab-2584-452f-9bdc-476103de560e] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd]) + I1102 23:18:11.740485 253591 system_pods.go:61] "kube-apiserver-scheduled-stop-206205" [be3cdd37-7ff6-4d68-8c6b-cd82a78eb07a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver]) + I1102 23:18:11.740488 253591 system_pods.go:61] "kube-controller-manager-scheduled-stop-206205" [7c802ff5-1d76-4a4b-99ed-54947cb4e48f] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) + I1102 23:18:11.740491 253591 system_pods.go:61] "kube-scheduler-scheduled-stop-206205" [357b6ad0-c778-4062-a5b1-40b7a42b1b45] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler]) + I1102 23:18:11.740493 253591 system_pods.go:61] "storage-provisioner" [30637bd6-c7f0-4fe5-b79f-a0d9e1f7ca18] Pending + I1102 23:18:11.740496 253591 system_pods.go:74] duration metric: took 1.273402ms to wait for pod list to return data ... + I1102 23:18:11.740501 253591 kubeadm.go:587] duration metric: took 267.910986ms to wait for: map[apiserver:true system_pods:true] + I1102 23:18:11.740508 253591 node_conditions.go:102] verifying NodePressure condition ... + I1102 23:18:11.741731 253591 node_conditions.go:122] node storage ephemeral capacity is 385926528Ki + I1102 23:18:11.741742 253591 node_conditions.go:123] node cpu capacity is 8 + I1102 23:18:11.741750 253591 node_conditions.go:105] duration metric: took 1.240672ms to run NodePressure ... + I1102 23:18:11.741757 253591 start.go:242] waiting for startup goroutines ... + I1102 23:18:12.111097 253591 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-206205" context rescaled to 1 replicas + I1102 23:18:12.111110 253591 start.go:247] waiting for cluster config update ... + I1102 23:18:12.111116 253591 start.go:256] writing updated cluster config ... + I1102 23:18:12.111299 253591 ssh_runner.go:195] Run: rm -f paused + I1102 23:18:12.140155 253591 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0) + I1102 23:18:12.140451 253591 out.go:179] * Done! kubectl is now configured to use "scheduled-stop-206205" cluster and "default" namespace by default + + + ==> Docker <== + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.495395710Z" level=info msg="Loading containers: done." + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.499868631Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.499901472Z" level=info msg="Initializing buildkit" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.509786801Z" level=info msg="Completed buildkit initialization" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.512985444Z" level=info msg="Daemon has completed initialization" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513034998Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513042406Z" level=info msg="API listen on [::]:2376" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513043378Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine... + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Starting cri-dockerd dev (HEAD)" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Start docker client with request timeout 0s" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Hairpin mode is set to hairpin-veth" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Loaded network plugin cni" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Docker cri networking managed by network plugin cni" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Setting cgroupDriver systemd" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Starting the GRPC backend for the Docker CRI interface." + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Start cri-dockerd grpc backend" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine. + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9f2a78be2847fcb06b2823f22f5f0affcfcf7152244128ce42976da2bfaf52cb/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a4584e48de87263ec3357dc6f99df95620088cbed099df4184139013c8ae44b/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d7b6e70bdcc89ff1ba082e5ef5211e4a08f08d2dbf1ac60c41d519f51a3ac28a/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/6f36d8ffcdc464b0e96984f4cf3f0b7d21f82ef8dffa4a527b77f1bc4d393f84/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + + + ==> container status <== + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 5db718ff95e2b 7dd6aaa1717ab 5 seconds ago Running kube-scheduler 0 6f36d8ffcdc46 kube-scheduler-scheduled-stop-206205 kube-system + bfb168ef202c9 c3994bc696102 5 seconds ago Running kube-apiserver 0 d7b6e70bdcc89 kube-apiserver-scheduled-stop-206205 kube-system + 10cdff77fbe70 c80c8dbafe7dd 5 seconds ago Running kube-controller-manager 0 0a4584e48de87 kube-controller-manager-scheduled-stop-206205 kube-system + ee6b590db1694 5f1f5298c888d 5 seconds ago Running etcd 0 9f2a78be2847f etcd-scheduled-stop-206205 kube-system + + + ==> describe nodes <== + Name: scheduled-stop-206205 + Roles: control-plane + Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=scheduled-stop-206205 + kubernetes.io/os=linux + minikube.k8s.io/commit=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 + minikube.k8s.io/name=scheduled-stop-206205 + minikube.k8s.io/primary=true + minikube.k8s.io/updated_at=2025_11_02T23_18_11_0700 + minikube.k8s.io/version=v1.37.0 + node-role.kubernetes.io/control-plane= + node.kubernetes.io/exclude-from-external-load-balancers= + Annotations: volumes.kubernetes.io/controller-managed-attach-detach: true + CreationTimestamp: Sun, 02 Nov 2025 23:18:08 +0000 + Taints: node.kubernetes.io/not-ready:NoSchedule + Unschedulable: false + Lease: + HolderIdentity: scheduled-stop-206205 + AcquireTime: + RenewTime: Sun, 02 Nov 2025 23:18:10 +0000 + Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized + Addresses: + InternalIP: 192.168.76.2 + Hostname: scheduled-stop-206205 + Capacity: + cpu: 8 + ephemeral-storage: 385926528Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 63789124Ki + pods: 110 + Allocatable: + cpu: 8 + ephemeral-storage: 385926528Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 63789124Ki + pods: 110 + System Info: + Machine ID: 98aac72b9abe9f06f1b9b38568f5cc96 + System UUID: 7a6fb796-ef04-49e4-b906-59d6d0acf104 + Boot ID: 239636f5-8285-461a-a1b0-1dff3163ae78 + Kernel Version: 6.6.97+ + OS Image: Debian GNU/Linux 12 (bookworm) + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://28.5.1 + Kubelet Version: v1.34.1 + Kube-Proxy Version: + Non-terminated Pods: (4 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system etcd-scheduled-stop-206205 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 2s + kube-system kube-apiserver-scheduled-stop-206205 250m (3%) 0 (0%) 0 (0%) 0 (0%) 2s + kube-system kube-controller-manager-scheduled-stop-206205 200m (2%) 0 (0%) 0 (0%) 0 (0%) 2s + kube-system kube-scheduler-scheduled-stop-206205 100m (1%) 0 (0%) 0 (0%) 0 (0%) 2s + Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 650m (8%) 0 (0%) + memory 100Mi (0%) 0 (0%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2s kubelet Starting kubelet. + Normal NodeAllocatableEnforced 2s kubelet Updated Node Allocatable limit across pods + Normal NodeHasSufficientMemory 2s kubelet Node scheduled-stop-206205 status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 2s kubelet Node scheduled-stop-206205 status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 2s kubelet Node scheduled-stop-206205 status is now: NodeHasSufficientPID + + + ==> dmesg <== + [ +0.007925] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +1.969222] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007882] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.055110] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000000] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000011] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007861] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [Nov 2 23:15] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007880] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.056105] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000007] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007875] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007851] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +8.240272] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000010] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000008] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000002] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007883] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007894] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [Nov 2 23:16] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0 + [ +0.002729] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0 + [ +0.004279] ll header: 00000000: ff ff ff ff ff ff f6 da 32 1e 36 f0 08 06 + [ +0.014177] ll header: 00000000: ff ff ff ff ff ff 0e 8f fb 63 f8 c5 08 06 + [Nov 2 23:17] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0 + [ +0.007032] ll header: 00000000: ff ff ff ff ff ff 3e 52 8f c5 07 40 08 06 + + + ==> etcd [ee6b590db169] <== + {"level":"warn","ts":"2025-11-02T23:18:08.342428Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60338","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.347248Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60344","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.351186Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60350","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.354399Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60360","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.357580Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60384","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.360743Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60410","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.364728Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60432","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.367889Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60468","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.372142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60474","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.375389Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60480","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.390957Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60512","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.394409Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60528","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.399792Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60568","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.402813Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60588","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.405858Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60598","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.409006Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60618","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.412203Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60648","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.415052Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60656","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.418142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60668","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.421304Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60680","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.424343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60698","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.435025Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60716","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.438022Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60740","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.441090Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60748","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.463684Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60780","server-name":"","error":"EOF"} + + + ==> kernel <== + 23:18:12 up 12 days, 17 min, 0 user, load average: 0.46, 0.78, 1.17 + Linux scheduled-stop-206205 6.6.97+ #1 SMP Fri Aug 22 11:53:37 UTC 2025 x86_64 GNU/Linux + PRETTY_NAME="Debian GNU/Linux 12 (bookworm)" + + + ==> kube-apiserver [bfb168ef202c] <== + I1102 23:18:08.753138 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:18:08.753051 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:18:08.753172 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:18:08.753192 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:18:08.753244 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:18:08.753259 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:18:08.753943 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:18:08.755940 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:18:08.755965 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:18:08.760634 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:18:08.760833 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:18:08.770224 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:18:09.655189 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:18:09.657367 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:18:09.657377 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:18:09.880746 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:18:09.898007 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:18:09.956161 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:18:09.958846 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2] + I1102 23:18:09.959340 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:18:09.961256 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:18:10.674093 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:18:10.810122 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:18:10.813893 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:18:10.818171 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + + + ==> kube-controller-manager [10cdff77fbe7] <== + I1102 23:18:12.472703 1 controllermanager.go:781] "Started controller" controller="replicaset-controller" + I1102 23:18:12.472763 1 replica_set.go:243] "Starting controller" logger="replicaset-controller" name="replicaset" + I1102 23:18:12.472769 1 shared_informer.go:349] "Waiting for caches to sync" controller="ReplicaSet" + I1102 23:18:12.624073 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" + I1102 23:18:12.624088 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-serving" + I1102 23:18:12.624104 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624237 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" + I1102 23:18:12.624254 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-client" + I1102 23:18:12.624269 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624538 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" + I1102 23:18:12.624549 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kube-apiserver-client" + I1102 23:18:12.624560 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624700 1 controllermanager.go:781] "Started controller" controller="certificatesigningrequest-signing-controller" + I1102 23:18:12.624734 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" + I1102 23:18:12.624741 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-legacy-unknown" + I1102 23:18:12.624752 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.774962 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" + I1102 23:18:12.774993 1 controllermanager.go:781] "Started controller" controller="node-ipam-controller" + I1102 23:18:12.775065 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" + I1102 23:18:12.775070 1 shared_informer.go:349] "Waiting for caches to sync" controller="node" + I1102 23:18:12.923282 1 controllermanager.go:781] "Started controller" controller="volumeattributesclass-protection-controller" + I1102 23:18:12.923296 1 controllermanager.go:733] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] + I1102 23:18:12.923311 1 controllermanager.go:733] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] + I1102 23:18:12.923338 1 vac_protection_controller.go:206] "Starting VAC protection controller" logger="volumeattributesclass-protection-controller" + I1102 23:18:12.923344 1 shared_informer.go:349] "Waiting for caches to sync" controller="VAC protection" + + + ==> kube-scheduler [5db718ff95e2] <== + E1102 23:18:08.674554 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + E1102 23:18:08.674593 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" + E1102 23:18:08.674407 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" + E1102 23:18:08.674737 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" + E1102 23:18:08.674784 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment" + E1102 23:18:08.674898 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod" + E1102 23:18:08.674967 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + E1102 23:18:08.674987 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice" + E1102 23:18:08.675001 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass" + E1102 23:18:08.675006 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode" + E1102 23:18:08.675038 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget" + E1102 23:18:08.675037 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim" + E1102 23:18:08.675073 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet" + E1102 23:18:08.675086 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController" + E1102 23:18:08.675091 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim" + E1102 23:18:08.675133 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass" + E1102 23:18:09.498694 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice" + E1102 23:18:09.518277 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" + E1102 23:18:09.583032 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap" + E1102 23:18:09.630579 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + E1102 23:18:09.656159 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" + E1102 23:18:09.734211 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + E1102 23:18:09.749758 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" + E1102 23:18:09.761325 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass" + I1102 23:18:12.573391 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" + + + ==> kubelet <== + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760447 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/13b7bafac6e39482382d174d83e53f4c-etcd-certs\") pod \"etcd-scheduled-stop-206205\" (UID: \"13b7bafac6e39482382d174d83e53f4c\") " pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760455 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-etc-ca-certificates\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760466 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760508 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-ca-certs\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760530 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760545 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-k8s-certs\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760563 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760589 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-ca-certs\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760612 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-etc-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760621 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-usr-local-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760633 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-usr-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.553524 2229 apiserver.go:52] "Watching apiserver" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.559667 2229 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.588855 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.588993 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.589025 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.589043 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592134 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-scheduled-stop-206205\" already exists" pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592531 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-scheduled-stop-206205\" already exists" pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592563 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-scheduled-stop-206205\" already exists" pod="kube-system/kube-scheduler-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592723 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-206205\" already exists" pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.605300 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-206205" podStartSLOduration=1.6052884139999999 podStartE2EDuration="1.605288414s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.600546288 +0000 UTC m=+1.083824257" watchObservedRunningTime="2025-11-02 23:18:11.605288414 +0000 UTC m=+1.088566378" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.610015 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-206205" podStartSLOduration=1.610003192 podStartE2EDuration="1.610003192s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.60551876 +0000 UTC m=+1.088796727" watchObservedRunningTime="2025-11-02 23:18:11.610003192 +0000 UTC m=+1.093281161" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.616609 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-206205" podStartSLOduration=1.616455215 podStartE2EDuration="1.616455215s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.610111732 +0000 UTC m=+1.093389699" watchObservedRunningTime="2025-11-02 23:18:11.616455215 +0000 UTC m=+1.099733181" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.616714 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-206205" podStartSLOduration=1.616707898 podStartE2EDuration="1.616707898s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.616318549 +0000 UTC m=+1.099596516" watchObservedRunningTime="2025-11-02 23:18:11.616707898 +0000 UTC m=+1.099985865" + + + -- /stdout -- + helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p scheduled-stop-206205 -n scheduled-stop-206205 + helpers_test.go:269: (dbg) Run: kubectl --context scheduled-stop-206205 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running + helpers_test.go:280: non-running pods: storage-provisioner + helpers_test.go:282: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <====== + helpers_test.go:285: (dbg) Run: kubectl --context scheduled-stop-206205 describe pod storage-provisioner + helpers_test.go:285: (dbg) Non-zero exit: kubectl --context scheduled-stop-206205 describe pod storage-provisioner: exit status 1 (34.067242ms) + + ** stderr ** + Error from server (NotFound): pods "storage-provisioner" not found + + ** /stderr ** + helpers_test.go:287: kubectl --context scheduled-stop-206205 describe pod storage-provisioner: exit status 1 + helpers_test.go:175: Cleaning up "scheduled-stop-206205" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p scheduled-stop-206205 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p scheduled-stop-206205: (1.661455603s) +--- FAIL: TestScheduledStopUnix (21.40s) +=== RUN TestSkaffold + skaffold_test.go:59: (dbg) Run: /tmp/skaffold.exe4010597047 version + skaffold_test.go:63: skaffold version: v2.16.1 + skaffold_test.go:66: (dbg) Run: out/minikube-linux-amd64 start -p skaffold-173551 --memory=3072 --driver=docker --container-runtime=docker + skaffold_test.go:66: (dbg) Done: out/minikube-linux-amd64 start -p skaffold-173551 --memory=3072 --driver=docker --container-runtime=docker: (19.133422319s) + skaffold_test.go:86: copying out/minikube-linux-amd64 to /home/prow/go/src/k8s.io/minikube/out/minikube + skaffold_test.go:105: (dbg) Run: /tmp/skaffold.exe4010597047 run --minikube-profile skaffold-173551 --kube-context skaffold-173551 --status-check=true --port-forward=false --interactive=false + skaffold_test.go:105: (dbg) Done: /tmp/skaffold.exe4010597047 run --minikube-profile skaffold-173551 --kube-context skaffold-173551 --status-check=true --port-forward=false --interactive=false: (30.52538076s) + skaffold_test.go:111: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-app" in namespace "default" ... + helpers_test.go:352: "leeroy-app-6558668585-9pt9p" [114ba388-2ea8-4bd8-9497-2e253faf44fc] Running + skaffold_test.go:111: (dbg) TestSkaffold: app=leeroy-app healthy within 6.002358591s + skaffold_test.go:114: (dbg) TestSkaffold: waiting 1m0s for pods matching "app=leeroy-web" in namespace "default" ... + helpers_test.go:352: "leeroy-web-566df5ddf4-8p68k" [56c44189-07d6-4b28-bd73-b2393ce7b43b] Running + skaffold_test.go:114: (dbg) TestSkaffold: app=leeroy-web healthy within 5.001647473s + helpers_test.go:175: Cleaning up "skaffold-173551" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p skaffold-173551 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p skaffold-173551: (2.317528366s) +--- PASS: TestSkaffold (64.02s) +=== RUN TestStartStop +=== PAUSE TestStartStop +=== RUN TestInsufficientStorage + status_test.go:50: (dbg) Run: out/minikube-linux-amd64 start -p insufficient-storage-570199 --memory=3072 --output=json --wait=true --driver=docker --container-runtime=docker + status_test.go:50: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p insufficient-storage-570199 --memory=3072 --output=json --wait=true --driver=docker --container-runtime=docker: exit status 26 (9.088000538s) + + -- stdout -- + {"specversion":"1.0","id":"ebc2b365-3009-4c9a-b080-fe749bbc57b0","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"0","message":"[insufficient-storage-570199] minikube v1.37.0 on Debian 12.12 (kvm/amd64)","name":"Initial Minikube Setup","totalsteps":"19"}} + {"specversion":"1.0","id":"8feee01a-fd02-4905-946b-db40b12ec37f","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true"}} + {"specversion":"1.0","id":"57e1703c-f784-43cf-ba62-daf89f909587","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig"}} + {"specversion":"1.0","id":"4bc813eb-3860-414e-8525-7d70d49f8cfd","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube"}} + {"specversion":"1.0","id":"65fdf822-c2df-4e63-90ac-cd909af9b828","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_BIN=out/minikube-linux-amd64"}} + {"specversion":"1.0","id":"a6aaca3e-5c6a-4a51-b825-6c3f598ab4ca","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_FORCE_SYSTEMD="}} + {"specversion":"1.0","id":"5e6cb4d5-f006-41dc-ab76-cb67c48cca62","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_STORAGE_CAPACITY=100"}} + {"specversion":"1.0","id":"e9901203-be5c-4ec4-9b11-11cd8f684b83","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"MINIKUBE_TEST_AVAILABLE_STORAGE=19"}} + {"specversion":"1.0","id":"f68f0092-5deb-43e3-af74-fcfdc7a93a4b","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"1","message":"Using the docker driver based on user configuration","name":"Selecting Driver","totalsteps":"19"}} + {"specversion":"1.0","id":"e0393cd9-c10a-42c2-a386-b6c105fa6f6d","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.info","datacontenttype":"application/json","data":{"message":"Using Docker driver with root privileges"}} + {"specversion":"1.0","id":"84fec073-b6ce-411a-816e-da8b82f270af","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"3","message":"Starting \"insufficient-storage-570199\" primary control-plane node in \"insufficient-storage-570199\" cluster","name":"Starting Node","totalsteps":"19"}} + {"specversion":"1.0","id":"986ca57d-ead8-4de9-93ae-96fdca69eb7c","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"5","message":"Pulling base image v0.0.48-1760939008-21773 ...","name":"Pulling Base Image","totalsteps":"19"}} + {"specversion":"1.0","id":"38ac6243-899b-4502-8d57-d549ab12d9d4","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.step","datacontenttype":"application/json","data":{"currentstep":"8","message":"Creating docker container (CPUs=2, Memory=3072MB) ...","name":"Creating Container","totalsteps":"19"}} + {"specversion":"1.0","id":"3a5580f2-979d-4867-8e55-242a8da134b0","source":"https://minikube.sigs.k8s.io/","type":"io.k8s.sigs.minikube.error","datacontenttype":"application/json","data":{"advice":"Try one or more of the following to free up space on the device:\n\n\t\t\t1. Run \"docker system prune\" to remove unused Docker data (optionally with \"-a\")\n\t\t\t2. Increase the storage allocated to Docker for Desktop by clicking on:\n\t\t\t\tDocker icon \u003e Preferences \u003e Resources \u003e Disk Image Size\n\t\t\t3. Run \"minikube ssh -- docker system prune\" if using the Docker container runtime","exitcode":"26","issues":"https://github.com/kubernetes/minikube/issues/9024","message":"Docker is out of disk space! (/var is at 100% of capacity). You can pass '--force' to skip this check.","name":"RSRC_DOCKER_STORAGE","url":""}} + + -- /stdout -- + status_test.go:76: (dbg) Run: out/minikube-linux-amd64 status -p insufficient-storage-570199 --output=json --layout=cluster + status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p insufficient-storage-570199 --output=json --layout=cluster: exit status 7 (187.190342ms) + + -- stdout -- + {"Name":"insufficient-storage-570199","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","Step":"Creating Container","StepDetail":"Creating docker container (CPUs=2, Memory=3072MB) ...","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-570199","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]} + + -- /stdout -- + ** stderr ** + E1102 23:19:28.277710 268127 status.go:458] kubeconfig endpoint: get endpoint: "insufficient-storage-570199" does not appear in /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + + ** /stderr ** + status_test.go:76: (dbg) Run: out/minikube-linux-amd64 status -p insufficient-storage-570199 --output=json --layout=cluster + status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p insufficient-storage-570199 --output=json --layout=cluster: exit status 7 (183.68161ms) + + -- stdout -- + {"Name":"insufficient-storage-570199","StatusCode":507,"StatusName":"InsufficientStorage","StatusDetail":"/var is almost out of disk space","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":500,"StatusName":"Error"}},"Nodes":[{"Name":"insufficient-storage-570199","StatusCode":507,"StatusName":"InsufficientStorage","Components":{"apiserver":{"Name":"apiserver","StatusCode":405,"StatusName":"Stopped"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]} + + -- /stdout -- + ** stderr ** + E1102 23:19:28.462213 268215 status.go:458] kubeconfig endpoint: get endpoint: "insufficient-storage-570199" does not appear in /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + E1102 23:19:28.468225 268215 status.go:258] unable to read event log: stat: stat /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/insufficient-storage-570199/events.json: no such file or directory + + ** /stderr ** + helpers_test.go:175: Cleaning up "insufficient-storage-570199" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p insufficient-storage-570199 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p insufficient-storage-570199: (1.45308649s) +--- PASS: TestInsufficientStorage (10.91s) +=== RUN TestRunningBinaryUpgrade +=== PAUSE TestRunningBinaryUpgrade +=== RUN TestStoppedBinaryUpgrade +=== PAUSE TestStoppedBinaryUpgrade +=== RUN TestKubernetesUpgrade +=== PAUSE TestKubernetesUpgrade +=== RUN TestMissingContainerUpgrade +=== PAUSE TestMissingContainerUpgrade +=== CONT TestOffline +=== CONT TestNoKubernetes +=== RUN TestNoKubernetes/serial + aab_offline_test.go:55: (dbg) Run: out/minikube-linux-amd64 start -p offline-docker-154933 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=docker --container-runtime=docker +=== RUN TestNoKubernetes/serial/StartNoK8sWithVersion + no_kubernetes_test.go:85: (dbg) Run: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --kubernetes-version=v1.28.0 --driver=docker --container-runtime=docker +=== CONT TestForceSystemdFlag +=== CONT TestCertExpiration + docker_test.go:91: (dbg) Run: out/minikube-linux-amd64 start -p force-systemd-flag-228918 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker + cert_options_test.go:123: (dbg) Run: out/minikube-linux-amd64 start -p cert-expiration-250017 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=docker + no_kubernetes_test.go:85: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --kubernetes-version=v1.28.0 --driver=docker --container-runtime=docker: exit status 14 (45.378959ms) + + -- stdout -- + * [NoKubernetes-160693] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + - MINIKUBE_BIN=out/minikube-linux-amd64 + - MINIKUBE_FORCE_SYSTEMD= + + + + -- /stdout -- + ** stderr ** + X Exiting due to MK_USAGE: cannot specify --kubernetes-version with --no-kubernetes, + to unset a global config run: + + $ minikube config unset kubernetes-version + + ** /stderr ** +=== RUN TestNoKubernetes/serial/StartWithK8s + no_kubernetes_test.go:97: (dbg) Run: out/minikube-linux-amd64 start -p NoKubernetes-160693 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker +E1102 23:19:41.962224 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + no_kubernetes_test.go:97: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-160693 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (26.614065226s) + no_kubernetes_test.go:202: (dbg) Run: out/minikube-linux-amd64 -p NoKubernetes-160693 status -o json +=== RUN TestNoKubernetes/serial/StartWithStopK8s + no_kubernetes_test.go:114: (dbg) Run: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker + docker_test.go:91: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-flag-228918 --memory=3072 --force-systemd --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (27.53912067s) + docker_test.go:110: (dbg) Run: out/minikube-linux-amd64 -p force-systemd-flag-228918 ssh "docker info --format {{.CgroupDriver}}" + helpers_test.go:175: Cleaning up "force-systemd-flag-228918" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p force-systemd-flag-228918 + cert_options_test.go:123: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-250017 --memory=3072 --cert-expiration=3m --driver=docker --container-runtime=docker: (29.449180157s) + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p force-systemd-flag-228918: (1.826339134s) +--- PASS: TestForceSystemdFlag (29.59s) +=== CONT TestNetworkPlugins +=== RUN TestNetworkPlugins/group +=== RUN TestNetworkPlugins/group/auto +=== PAUSE TestNetworkPlugins/group/auto +=== RUN TestNetworkPlugins/group/kubenet +=== PAUSE TestNetworkPlugins/group/kubenet +=== RUN TestNetworkPlugins/group/bridge +=== PAUSE TestNetworkPlugins/group/bridge +=== RUN TestNetworkPlugins/group/enable-default-cni +=== PAUSE TestNetworkPlugins/group/enable-default-cni +=== RUN TestNetworkPlugins/group/flannel +=== PAUSE TestNetworkPlugins/group/flannel +=== RUN TestNetworkPlugins/group/kindnet +=== PAUSE TestNetworkPlugins/group/kindnet +=== RUN TestNetworkPlugins/group/false +=== PAUSE TestNetworkPlugins/group/false +=== RUN TestNetworkPlugins/group/custom-flannel +=== PAUSE TestNetworkPlugins/group/custom-flannel +=== RUN TestNetworkPlugins/group/calico +=== PAUSE TestNetworkPlugins/group/calico +=== RUN TestNetworkPlugins/group/cilium + net_test.go:102: Skipping the test as it's interfering with other tests and is outdated + panic.go:636: + ----------------------- debugLogs start: cilium-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nslookup debug kubernetes.default a-records: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig search kubernetes.default: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nc 10.96.0.10 udp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nc 10.96.0.10 tcp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/nsswitch.conf: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/hosts: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/resolv.conf: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: /etc/nsswitch.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/hosts: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/resolv.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: crictl pods: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crictl containers: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: describe netcat deployment: + error: context "cilium-999044" does not exist + + + >>> k8s: describe netcat pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: netcat logs: + error: context "cilium-999044" does not exist + + + >>> k8s: describe coredns deployment: + error: context "cilium-999044" does not exist + + + >>> k8s: describe coredns pods: + error: context "cilium-999044" does not exist + + + >>> k8s: coredns logs: + error: context "cilium-999044" does not exist + + + >>> k8s: describe api server pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: api server logs: + error: context "cilium-999044" does not exist + + + >>> host: /etc/cni: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: ip a s: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: ip r s: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: iptables-save: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: iptables table nat: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: describe cilium daemon set: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: describe cilium daemon set pod(s): + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: cilium daemon set container(s) logs (current): + error: context "cilium-999044" does not exist + + + >>> k8s: cilium daemon set container(s) logs (previous): + error: context "cilium-999044" does not exist + + + >>> k8s: describe cilium deployment: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: describe cilium deployment pod(s): + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: cilium deployment container(s) logs (current): + error: context "cilium-999044" does not exist + + + >>> k8s: cilium deployment container(s) logs (previous): + error: context "cilium-999044" does not exist + + + >>> k8s: describe kube-proxy daemon set: + error: context "cilium-999044" does not exist + + + >>> k8s: describe kube-proxy pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: kube-proxy logs: + error: context "cilium-999044" does not exist + + + >>> host: kubelet daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: kubelet daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: kubelet logs: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/kubernetes/kubelet.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /var/lib/kubelet/config.yaml: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:55 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: NoKubernetes-160693 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: cert-expiration-250017 + contexts: + - context: + cluster: NoKubernetes-160693 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:55 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: NoKubernetes-160693 + name: NoKubernetes-160693 + - context: + cluster: cert-expiration-250017 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: cert-expiration-250017 + name: cert-expiration-250017 + current-context: cert-expiration-250017 + kind: Config + users: + - name: NoKubernetes-160693 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/NoKubernetes-160693/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/NoKubernetes-160693/client.key + - name: cert-expiration-250017 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/cert-expiration-250017/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/cert-expiration-250017/client.key + + + >>> k8s: cms: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: docker daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: docker daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/docker/daemon.json: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: docker system info: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-docker daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-docker daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /usr/lib/systemd/system/cri-docker.service: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-dockerd version: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /lib/systemd/system/containerd.service: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/containerd/config.toml: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd config dump: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/crio: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + ----------------------- debugLogs end: cilium-999044 [took: 1.6580351s] -------------------------------- + helpers_test.go:175: Cleaning up "cilium-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p cilium-999044 +=== CONT TestForceSystemdEnv + docker_test.go:155: (dbg) Run: out/minikube-linux-amd64 start -p force-systemd-env-533490 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker + no_kubernetes_test.go:114: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (13.915724426s) + no_kubernetes_test.go:202: (dbg) Run: out/minikube-linux-amd64 -p NoKubernetes-160693 status -o json + no_kubernetes_test.go:202: (dbg) Non-zero exit: out/minikube-linux-amd64 -p NoKubernetes-160693 status -o json: exit status 2 (209.165005ms) + + -- stdout -- + {"Name":"NoKubernetes-160693","Host":"Running","Kubelet":"Stopped","APIServer":"Stopped","Kubeconfig":"Configured","Worker":false} + + -- /stdout -- + no_kubernetes_test.go:126: (dbg) Run: out/minikube-linux-amd64 delete -p NoKubernetes-160693 + no_kubernetes_test.go:126: (dbg) Done: out/minikube-linux-amd64 delete -p NoKubernetes-160693: (1.462127513s) +=== RUN TestNoKubernetes/serial/Start + no_kubernetes_test.go:138: (dbg) Run: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker + no_kubernetes_test.go:138: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-160693 --no-kubernetes --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (4.61259418s) +=== RUN TestNoKubernetes/serial/VerifyK8sNotRunning + no_kubernetes_test.go:149: (dbg) Run: out/minikube-linux-amd64 ssh -p NoKubernetes-160693 "sudo systemctl is-active --quiet service kubelet" + no_kubernetes_test.go:149: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-160693 "sudo systemctl is-active --quiet service kubelet": exit status 1 (192.210294ms) + + ** stderr ** + ssh: Process exited with status 3 + + ** /stderr ** +=== RUN TestNoKubernetes/serial/ProfileList + no_kubernetes_test.go:171: (dbg) Run: out/minikube-linux-amd64 profile list + no_kubernetes_test.go:181: (dbg) Run: out/minikube-linux-amd64 profile list --output=json + docker_test.go:155: (dbg) Done: out/minikube-linux-amd64 start -p force-systemd-env-533490 --memory=3072 --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (16.747722518s) + docker_test.go:110: (dbg) Run: out/minikube-linux-amd64 -p force-systemd-env-533490 ssh "docker info --format {{.CgroupDriver}}" + helpers_test.go:175: Cleaning up "force-systemd-env-533490" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p force-systemd-env-533490 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p force-systemd-env-533490: (1.667336318s) +--- PASS: TestForceSystemdEnv (18.63s) +=== CONT TestStoppedBinaryUpgrade +=== RUN TestStoppedBinaryUpgrade/Setup +=== RUN TestStoppedBinaryUpgrade/Upgrade + version_upgrade_test.go:183: (dbg) Run: /tmp/minikube-v1.32.0.1866352777 start -p stopped-upgrade-640989 --memory=3072 --vm-driver=docker --container-runtime=docker +E1102 23:20:32.448029 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + no_kubernetes_test.go:181: (dbg) Done: out/minikube-linux-amd64 profile list --output=json: (17.531324285s) +=== RUN TestNoKubernetes/serial/Stop + no_kubernetes_test.go:160: (dbg) Run: out/minikube-linux-amd64 stop -p NoKubernetes-160693 + no_kubernetes_test.go:160: (dbg) Done: out/minikube-linux-amd64 stop -p NoKubernetes-160693: (3.861182398s) +=== RUN TestNoKubernetes/serial/StartNoArgs + no_kubernetes_test.go:193: (dbg) Run: out/minikube-linux-amd64 start -p NoKubernetes-160693 --driver=docker --container-runtime=docker + aab_offline_test.go:55: (dbg) Done: out/minikube-linux-amd64 start -p offline-docker-154933 --alsologtostderr -v=1 --memory=3072 --wait=true --driver=docker --container-runtime=docker: (1m11.040915561s) + helpers_test.go:175: Cleaning up "offline-docker-154933" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p offline-docker-154933 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p offline-docker-154933: (2.171995023s) +--- PASS: TestOffline (73.21s) +=== CONT TestMissingContainerUpgrade + version_upgrade_test.go:309: (dbg) Run: /tmp/minikube-v1.32.0.660601076 start -p missing-upgrade-264144 --memory=3072 --driver=docker --container-runtime=docker + no_kubernetes_test.go:193: (dbg) Done: out/minikube-linux-amd64 start -p NoKubernetes-160693 --driver=docker --container-runtime=docker: (8.352573795s) +=== RUN TestNoKubernetes/serial/VerifyK8sNotRunningSecond + no_kubernetes_test.go:149: (dbg) Run: out/minikube-linux-amd64 ssh -p NoKubernetes-160693 "sudo systemctl is-active --quiet service kubelet" + no_kubernetes_test.go:149: (dbg) Non-zero exit: out/minikube-linux-amd64 ssh -p NoKubernetes-160693 "sudo systemctl is-active --quiet service kubelet": exit status 1 (208.84241ms) + + ** stderr ** + ssh: Process exited with status 3 + + ** /stderr ** + helpers_test.go:175: Cleaning up "NoKubernetes-160693" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p NoKubernetes-160693 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p NoKubernetes-160693: (3.080667209s) +--- PASS: TestNoKubernetes (81.02s) + --- PASS: TestNoKubernetes/serial (77.94s) + --- PASS: TestNoKubernetes/serial/StartNoK8sWithVersion (0.05s) + --- PASS: TestNoKubernetes/serial/StartWithK8s (26.87s) + --- PASS: TestNoKubernetes/serial/StartWithStopK8s (15.59s) + --- PASS: TestNoKubernetes/serial/Start (4.61s) + --- PASS: TestNoKubernetes/serial/VerifyK8sNotRunning (0.19s) + --- PASS: TestNoKubernetes/serial/ProfileList (18.20s) + --- PASS: TestNoKubernetes/serial/Stop (3.86s) + --- PASS: TestNoKubernetes/serial/StartNoArgs (8.35s) + --- PASS: TestNoKubernetes/serial/VerifyK8sNotRunningSecond (0.21s) +=== CONT TestKubernetesUpgrade + version_upgrade_test.go:222: (dbg) Run: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + version_upgrade_test.go:183: (dbg) Done: /tmp/minikube-v1.32.0.1866352777 start -p stopped-upgrade-640989 --memory=3072 --vm-driver=docker --container-runtime=docker: (42.314782194s) + version_upgrade_test.go:192: (dbg) Run: /tmp/minikube-v1.32.0.1866352777 -p stopped-upgrade-640989 stop + version_upgrade_test.go:309: (dbg) Done: /tmp/minikube-v1.32.0.660601076 start -p missing-upgrade-264144 --memory=3072 --driver=docker --container-runtime=docker: (19.990232508s) + version_upgrade_test.go:318: (dbg) Run: docker stop missing-upgrade-264144 + version_upgrade_test.go:192: (dbg) Done: /tmp/minikube-v1.32.0.1866352777 -p stopped-upgrade-640989 stop: (1.467499162s) + version_upgrade_test.go:198: (dbg) Run: out/minikube-linux-amd64 start -p stopped-upgrade-640989 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + version_upgrade_test.go:318: (dbg) Done: docker stop missing-upgrade-264144: (1.42686456s) + version_upgrade_test.go:323: (dbg) Run: docker rm missing-upgrade-264144 + version_upgrade_test.go:329: (dbg) Run: out/minikube-linux-amd64 start -p missing-upgrade-264144 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + version_upgrade_test.go:222: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.28.0 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (15.829772034s) + version_upgrade_test.go:227: (dbg) Run: out/minikube-linux-amd64 stop -p kubernetes-upgrade-406587 + version_upgrade_test.go:198: (dbg) Done: out/minikube-linux-amd64 start -p stopped-upgrade-640989 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (11.599027838s) +=== RUN TestStoppedBinaryUpgrade/MinikubeLogs + version_upgrade_test.go:206: (dbg) Run: out/minikube-linux-amd64 logs -p stopped-upgrade-640989 + helpers_test.go:175: Cleaning up "stopped-upgrade-640989" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p stopped-upgrade-640989 + version_upgrade_test.go:227: (dbg) Done: out/minikube-linux-amd64 stop -p kubernetes-upgrade-406587: (10.459615658s) + version_upgrade_test.go:232: (dbg) Run: out/minikube-linux-amd64 -p kubernetes-upgrade-406587 status --format={{.Host}} + version_upgrade_test.go:232: (dbg) Non-zero exit: out/minikube-linux-amd64 -p kubernetes-upgrade-406587 status --format={{.Host}}: exit status 7 (40.057585ms) + + -- stdout -- + Stopped + + -- /stdout -- + version_upgrade_test.go:234: status error: exit status 7 (may be ok) + version_upgrade_test.go:243: (dbg) Run: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p stopped-upgrade-640989: (1.612163868s) +--- PASS: TestStoppedBinaryUpgrade (58.78s) + --- PASS: TestStoppedBinaryUpgrade/Setup (1.28s) + --- PASS: TestStoppedBinaryUpgrade/Upgrade (55.38s) + --- PASS: TestStoppedBinaryUpgrade/MinikubeLogs (0.50s) +=== CONT TestStartStop +=== RUN TestStartStop/group +=== RUN TestStartStop/group/old-k8s-version +=== PAUSE TestStartStop/group/old-k8s-version +=== RUN TestStartStop/group/newest-cni +=== PAUSE TestStartStop/group/newest-cni +=== RUN TestStartStop/group/default-k8s-diff-port +=== PAUSE TestStartStop/group/default-k8s-diff-port +=== RUN TestStartStop/group/no-preload +=== PAUSE TestStartStop/group/no-preload +=== RUN TestStartStop/group/disable-driver-mounts +=== PAUSE TestStartStop/group/disable-driver-mounts +=== RUN TestStartStop/group/embed-certs +=== PAUSE TestStartStop/group/embed-certs +=== CONT TestRunningBinaryUpgrade + version_upgrade_test.go:120: (dbg) Run: /tmp/minikube-v1.32.0.2294987543 start -p running-upgrade-735317 --memory=3072 --vm-driver=docker --container-runtime=docker + version_upgrade_test.go:120: (dbg) Done: /tmp/minikube-v1.32.0.2294987543 start -p running-upgrade-735317 --memory=3072 --vm-driver=docker --container-runtime=docker: (21.345310262s) + version_upgrade_test.go:130: (dbg) Run: out/minikube-linux-amd64 start -p running-upgrade-735317 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + version_upgrade_test.go:329: (dbg) Done: out/minikube-linux-amd64 start -p missing-upgrade-264144 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (36.014769317s) + helpers_test.go:175: Cleaning up "missing-upgrade-264144" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p missing-upgrade-264144 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p missing-upgrade-264144: (1.615895946s) +--- PASS: TestMissingContainerUpgrade (59.87s) +=== CONT TestPause +=== RUN TestPause/serial +=== RUN TestPause/serial/Start + pause_test.go:80: (dbg) Run: out/minikube-linux-amd64 start -p pause-443947 --memory=3072 --install-addons=false --wait=all --driver=docker --container-runtime=docker + version_upgrade_test.go:130: (dbg) Done: out/minikube-linux-amd64 start -p running-upgrade-735317 --memory=3072 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (18.449073973s) + helpers_test.go:175: Cleaning up "running-upgrade-735317" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p running-upgrade-735317 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p running-upgrade-735317: (1.626906182s) +--- PASS: TestRunningBinaryUpgrade (42.16s) +=== CONT TestCertOptions + cert_options_test.go:49: (dbg) Run: out/minikube-linux-amd64 start -p cert-options-284800 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=docker + cert_options_test.go:49: (dbg) Done: out/minikube-linux-amd64 start -p cert-options-284800 --memory=3072 --apiserver-ips=127.0.0.1 --apiserver-ips=192.168.15.15 --apiserver-names=localhost --apiserver-names=www.google.com --apiserver-port=8555 --driver=docker --container-runtime=docker: (19.109380384s) + cert_options_test.go:60: (dbg) Run: out/minikube-linux-amd64 -p cert-options-284800 ssh "openssl x509 -text -noout -in /var/lib/minikube/certs/apiserver.crt" + cert_options_test.go:88: (dbg) Run: kubectl --context cert-options-284800 config view + cert_options_test.go:100: (dbg) Run: out/minikube-linux-amd64 ssh -p cert-options-284800 -- "sudo cat /etc/kubernetes/admin.conf" + helpers_test.go:175: Cleaning up "cert-options-284800" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p cert-options-284800 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p cert-options-284800: (1.605866144s) +--- PASS: TestCertOptions (21.13s) +=== CONT TestDockerFlags + docker_test.go:51: (dbg) Run: out/minikube-linux-amd64 start -p docker-flags-640102 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker --container-runtime=docker + docker_test.go:51: (dbg) Done: out/minikube-linux-amd64 start -p docker-flags-640102 --cache-images=false --memory=3072 --install-addons=false --wait=false --docker-env=FOO=BAR --docker-env=BAZ=BAT --docker-opt=debug --docker-opt=icc=true --alsologtostderr -v=5 --driver=docker --container-runtime=docker: (18.717342837s) + docker_test.go:56: (dbg) Run: out/minikube-linux-amd64 -p docker-flags-640102 ssh "sudo systemctl show docker --property=Environment --no-pager" + docker_test.go:67: (dbg) Run: out/minikube-linux-amd64 -p docker-flags-640102 ssh "sudo systemctl show docker --property=ExecStart --no-pager" + pause_test.go:80: (dbg) Done: out/minikube-linux-amd64 start -p pause-443947 --memory=3072 --install-addons=false --wait=all --driver=docker --container-runtime=docker: (57.993657537s) +=== RUN TestPause/serial/SecondStartNoReconfiguration + pause_test.go:92: (dbg) Run: out/minikube-linux-amd64 start -p pause-443947 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + helpers_test.go:175: Cleaning up "docker-flags-640102" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p docker-flags-640102 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p docker-flags-640102: (1.633041119s) +--- PASS: TestDockerFlags (20.73s) +=== CONT TestNetworkPlugins/group/auto +=== RUN TestNetworkPlugins/group/auto/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p auto-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker --container-runtime=docker + cert_options_test.go:131: (dbg) Run: out/minikube-linux-amd64 start -p cert-expiration-250017 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=docker + version_upgrade_test.go:243: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (1m45.195288298s) + version_upgrade_test.go:248: (dbg) Run: kubectl --context kubernetes-upgrade-406587 version --output=json + version_upgrade_test.go:267: Attempting to downgrade Kubernetes (should fail) + version_upgrade_test.go:269: (dbg) Run: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker --container-runtime=docker + version_upgrade_test.go:269: (dbg) Non-zero exit: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.28.0 --driver=docker --container-runtime=docker: exit status 106 (39.867367ms) + + -- stdout -- + * [kubernetes-upgrade-406587] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + - MINIKUBE_BIN=out/minikube-linux-amd64 + - MINIKUBE_FORCE_SYSTEMD= + + + + -- /stdout -- + ** stderr ** + X Exiting due to K8S_DOWNGRADE_UNSUPPORTED: Unable to safely downgrade existing Kubernetes v1.34.1 cluster to v1.28.0 + * Suggestion: + + 1) Recreate the cluster with Kubernetes 1.28.0, by running: + + minikube delete -p kubernetes-upgrade-406587 + minikube start -p kubernetes-upgrade-406587 --kubernetes-version=v1.28.0 + + 2) Create a second cluster with Kubernetes 1.28.0, by running: + + minikube start -p kubernetes-upgrade-4065872 --kubernetes-version=v1.28.0 + + 3) Use the existing cluster at version Kubernetes 1.34.1, by running: + + minikube start -p kubernetes-upgrade-406587 --kubernetes-version=v1.34.1 + + + ** /stderr ** + version_upgrade_test.go:273: Attempting restart after unsuccessful downgrade + version_upgrade_test.go:275: (dbg) Run: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=docker + cert_options_test.go:131: (dbg) Done: out/minikube-linux-amd64 start -p cert-expiration-250017 --memory=3072 --cert-expiration=8760h --driver=docker --container-runtime=docker: (31.329123343s) + helpers_test.go:175: Cleaning up "cert-expiration-250017" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p cert-expiration-250017 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p cert-expiration-250017: (1.703842955s) +--- PASS: TestCertExpiration (242.48s) +=== CONT TestNetworkPlugins/group/kindnet +=== RUN TestNetworkPlugins/group/kindnet/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p kindnet-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker --container-runtime=docker + version_upgrade_test.go:275: (dbg) Done: out/minikube-linux-amd64 start -p kubernetes-upgrade-406587 --memory=3072 --kubernetes-version=v1.34.1 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (31.396583308s) + helpers_test.go:175: Cleaning up "kubernetes-upgrade-406587" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p kubernetes-upgrade-406587 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p kubernetes-upgrade-406587: (3.091154461s) +--- PASS: TestKubernetesUpgrade (166.08s) +=== CONT TestNetworkPlugins/group/calico +=== RUN TestNetworkPlugins/group/calico/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p calico-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker --container-runtime=docker + pause_test.go:92: (dbg) Done: out/minikube-linux-amd64 start -p pause-443947 --alsologtostderr -v=1 --driver=docker --container-runtime=docker: (57.242197429s) +=== RUN TestPause/serial/Pause + pause_test.go:110: (dbg) Run: out/minikube-linux-amd64 pause -p pause-443947 --alsologtostderr -v=5 + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p auto-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --driver=docker --container-runtime=docker: (55.686398614s) +=== RUN TestNetworkPlugins/group/auto/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p auto-999044 "pgrep -a kubelet" +I1102 23:23:38.574463 37869 config.go:182] Loaded profile config "auto-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/auto/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context auto-999044 replace --force -f testdata/netcat-deployment.yaml +=== RUN TestPause/serial/VerifyStatus + status_test.go:76: (dbg) Run: out/minikube-linux-amd64 status -p pause-443947 --output=json --layout=cluster + status_test.go:76: (dbg) Non-zero exit: out/minikube-linux-amd64 status -p pause-443947 --output=json --layout=cluster: exit status 2 (211.596308ms) + + -- stdout -- + {"Name":"pause-443947","StatusCode":418,"StatusName":"Paused","Step":"Done","StepDetail":"* Paused 12 containers in: kube-system, kubernetes-dashboard, istio-operator","BinaryVersion":"v1.37.0","Components":{"kubeconfig":{"Name":"kubeconfig","StatusCode":200,"StatusName":"OK"}},"Nodes":[{"Name":"pause-443947","StatusCode":200,"StatusName":"OK","Components":{"apiserver":{"Name":"apiserver","StatusCode":418,"StatusName":"Paused"},"kubelet":{"Name":"kubelet","StatusCode":405,"StatusName":"Stopped"}}}]} + + -- /stdout -- +=== RUN TestPause/serial/Unpause + pause_test.go:121: (dbg) Run: out/minikube-linux-amd64 unpause -p pause-443947 --alsologtostderr -v=5 + net_test.go:149: (dbg) Done: kubectl --context auto-999044 replace --force -f testdata/netcat-deployment.yaml: (1.234984502s) +I1102 23:23:40.838251 37869 kapi.go:136] Waiting for deployment netcat to stabilize, generation 1 observed generation 0 spec.replicas 1 status.replicas 0 +I1102 23:23:40.874814 37869 kapi.go:136] Waiting for deployment netcat to stabilize, generation 1 observed generation 1 spec.replicas 1 status.replicas 0 + net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-klkm2" [a4a5ce94-fb38-422d-a463-338a39c9552d] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + pause_test.go:121: (dbg) Done: out/minikube-linux-amd64 unpause -p pause-443947 --alsologtostderr -v=5: (2.198949808s) +=== RUN TestPause/serial/PauseAgain + pause_test.go:110: (dbg) Run: out/minikube-linux-amd64 pause -p pause-443947 --alsologtostderr -v=5 +=== RUN TestPause/serial/DeletePaused + pause_test.go:132: (dbg) Run: out/minikube-linux-amd64 delete -p pause-443947 --alsologtostderr -v=5 + pause_test.go:132: (dbg) Done: out/minikube-linux-amd64 delete -p pause-443947 --alsologtostderr -v=5: (1.762034593s) +=== RUN TestPause/serial/VerifyDeletedResources + pause_test.go:142: (dbg) Run: out/minikube-linux-amd64 profile list --output json + pause_test.go:168: (dbg) Run: docker ps -a + pause_test.go:173: (dbg) Run: docker volume inspect pause-443947 + pause_test.go:173: (dbg) Non-zero exit: docker volume inspect pause-443947: exit status 1 (8.936518ms) + + -- stdout -- + [] + + -- /stdout -- + ** stderr ** + Error response from daemon: get pause-443947: no such volume + + ** /stderr ** + pause_test.go:178: (dbg) Run: docker network ls + helpers_test.go:175: Cleaning up "pause-443947" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p pause-443947 +--- PASS: TestPause (120.81s) + --- PASS: TestPause/serial (120.72s) + --- PASS: TestPause/serial/Start (57.99s) + --- PASS: TestPause/serial/SecondStartNoReconfiguration (57.26s) + --- PASS: TestPause/serial/Pause (0.35s) + --- PASS: TestPause/serial/VerifyStatus (0.21s) + --- PASS: TestPause/serial/Unpause (2.20s) + --- PASS: TestPause/serial/PauseAgain (0.42s) + --- PASS: TestPause/serial/DeletePaused (1.76s) + --- PASS: TestPause/serial/VerifyDeletedResources (0.53s) +=== CONT TestNetworkPlugins/group/custom-flannel +=== RUN TestNetworkPlugins/group/custom-flannel/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p custom-flannel-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker --container-runtime=docker + helpers_test.go:352: "netcat-cd4db9dbf-klkm2" [a4a5ce94-fb38-422d-a463-338a39c9552d] Running + net_test.go:163: (dbg) TestNetworkPlugins/group/auto/NetCatPod: app=netcat healthy within 8.005453129s +=== RUN TestNetworkPlugins/group/auto/DNS + net_test.go:175: (dbg) Run: kubectl --context auto-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/auto/Localhost + net_test.go:194: (dbg) Run: kubectl --context auto-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/auto/HairPin + net_test.go:264: (dbg) Run: kubectl --context auto-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "auto" test finished in 3m49.729104754s, failed=false + net_test.go:211: + ----------------------- debugLogs start: auto-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-klkm2 + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.76.2 auto-999044 + 192.168.76.1 host.minikube.internal + 192.168.76.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.76.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/auto-999044 Ready control-plane 60s v1.34.1 192.168.76.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 59s + default service/netcat ClusterIP 10.104.16.177 8080/TCP 16s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 58s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.76.2:8443 59s + default endpoints/netcat 10.244.0.3:8080 16s + kube-system endpoints/k8s.io-minikube-hostpath 52s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 53s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 58s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 17s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 58s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-klkm2 1/1 Running 0 16s 10.244.0.3 auto-999044 + kube-system pod/coredns-66bc5c9577-mxnbp 1/1 Running 0 52s 10.244.0.2 auto-999044 + kube-system pod/etcd-auto-999044 1/1 Running 0 58s 192.168.76.2 auto-999044 + kube-system pod/kube-apiserver-auto-999044 1/1 Running 0 59s 192.168.76.2 auto-999044 + kube-system pod/kube-controller-manager-auto-999044 1/1 Running 0 58s 192.168.76.2 auto-999044 + kube-system pod/kube-proxy-9w7hc 1/1 Running 0 52s 192.168.76.2 auto-999044 + kube-system pod/kube-scheduler-auto-999044 1/1 Running 0 58s 192.168.76.2 auto-999044 + kube-system pod/storage-provisioner 1/1 Running 1 (21s ago) 52s 192.168.76.2 auto-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 42fd62c06fc88 14 seconds ago Ready netcat-cd4db9dbf-klkm2 default 0 (default) + c924501b56abb 51 seconds ago Ready storage-provisioner kube-system 0 (default) + 28d64c4d85ace 51 seconds ago Ready coredns-66bc5c9577-mxnbp kube-system 0 (default) + 33071dea37c65 51 seconds ago Ready kube-proxy-9w7hc kube-system 0 (default) + 346d207f8fe18 About a minute ago Ready kube-apiserver-auto-999044 kube-system 0 (default) + 3025f09650fa3 About a minute ago Ready etcd-auto-999044 kube-system 0 (default) + 07a95d7d60e48 About a minute ago Ready kube-controller-manager-auto-999044 kube-system 0 (default) + b2ecc333d57b3 About a minute ago Ready kube-scheduler-auto-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + d26403f5e5773 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 13 seconds ago Running dnsutils 0 42fd62c06fc88 netcat-cd4db9dbf-klkm2 default + b2556765f7383 6e38f40d628db 19 seconds ago Running storage-provisioner 1 c924501b56abb storage-provisioner kube-system + 4f07a6c8f344e 6e38f40d628db 51 seconds ago Exited storage-provisioner 0 c924501b56abb storage-provisioner kube-system + 1c82279a9cb5e 52546a367cc9e 51 seconds ago Running coredns 0 28d64c4d85ace coredns-66bc5c9577-mxnbp kube-system + 9c0ad8c638527 fc25172553d79 51 seconds ago Running kube-proxy 0 33071dea37c65 kube-proxy-9w7hc kube-system + b676e439e2a85 c80c8dbafe7dd About a minute ago Running kube-controller-manager 0 07a95d7d60e48 kube-controller-manager-auto-999044 kube-system + b81bf8c1821cc 5f1f5298c888d About a minute ago Running etcd 0 3025f09650fa3 etcd-auto-999044 kube-system + 00d3e061ae4b0 c3994bc696102 About a minute ago Running kube-apiserver 0 346d207f8fe18 kube-apiserver-auto-999044 kube-system + 63337d019d096 7dd6aaa1717ab About a minute ago Running kube-scheduler 0 b2ecc333d57b3 kube-scheduler-auto-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:23:38 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 16s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-klkm2 + Namespace: default + Priority: 0 + Service Account: default + Node: auto-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:23:40 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://d26403f5e5773f80d7cc8fa96e8ec45de94311b4717f5b8bcd2665df32960524 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:23:42 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pkdnk (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-pkdnk: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15s default-scheduler Successfully assigned default/netcat-cd4db9dbf-klkm2 to auto-999044 + Normal Pulling 14s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 13s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 1.414s (1.414s including waiting). Image size: 127004766 bytes. + Normal Created 13s kubelet Created container: dnsutils + Normal Started 13s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:22:57 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 52s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 52s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-mxnbp + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: auto-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:23:03 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://1c82279a9cb5eef9af9728cf00efc3297102cdc50b2d00f3d6f438c05e7b1fd0 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:23:04 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qjxnk (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-qjxnk: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 52s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-mxnbp to auto-999044 + Normal Pulled 51s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 51s kubelet Created container: coredns + Normal Started 51s kubelet Started container coredns + Warning Unhealthy 29s (x4 over 49s) kubelet Readiness probe failed: HTTP probe failed with statuscode: 503 + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API + .:53 + [INFO] plugin/reload: Running configuration SHA512 = 3e2243e8b9e7116f563b83b1933f477a68ba9ad4a829ed5d7e54629fb2ce53528b9bc6023030be20be434ad805fd246296dd428c64e9bbef3a70f22b8621f560 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:49977 - 57353 "HINFO IN 8964218168939069100.8385655426704767249. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.026625189s + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] 10.244.0.3:35896 - 20353 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.00022997s + [INFO] 10.244.0.3:39753 - 60501 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.00011803s + [INFO] 10.244.0.3:51614 - 34783 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000083132s + [INFO] 10.244.0.3:57560 - 17588 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000713286s + [INFO] 10.244.0.3:57560 - 17920 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000712532s + [INFO] 10.244.0.3:60214 - 58312 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000139337s + [INFO] 10.244.0.3:53491 - 31755 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.00029419s + [INFO] 10.244.0.3:34175 - 51028 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000084121s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-auto-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: auto-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:22:57 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.76.2:8443 + kubernetes.io/config.hash: db99d263c18f0527b65667b4dd8e6d9b + kubernetes.io/config.mirror: db99d263c18f0527b65667b4dd8e6d9b + kubernetes.io/config.seen: 2025-11-02T23:22:53.860431649Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: Node/auto-999044 + Containers: + kube-apiserver: + Container ID: docker://00d3e061ae4b0694347a44049217016b774686c335d2038bbb15bd11906f735e + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.76.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:22:54 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.76.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:22:54.504039 1 options.go:263] external host was not specified, using 192.168.76.2 + I1102 23:22:54.505344 1 server.go:150] Version: v1.34.1 + I1102 23:22:54.505361 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:22:54.732267 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:22:54.732283 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:22:54.732287 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:22:54.732289 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:22:54.732292 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:22:54.732294 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:22:54.732296 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:22:54.732298 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:22:54.732301 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:22:54.732302 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:22:54.732304 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:22:54.732306 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:22:54.741578 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:54.741797 1 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:22:54.742422 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:22:54.748029 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:22:54.751883 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:22:54.751896 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:22:54.752063 1 instance.go:239] Using reconciler: lease + W1102 23:22:54.752634 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.214603 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.221900 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:22:55.227199 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:22:55.227214 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.229409 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:22:55.229798 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.233303 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.236811 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.240124 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.244119 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.247450 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.250832 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.254197 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.258698 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.263815 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.267857 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.271810 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.275003 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.278384 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.281579 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.284851 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.288315 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:22:55.301538 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:22:55.301690 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:22:55.302338 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.306532 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.311994 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.315222 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.318566 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.322572 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.326129 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.329236 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.332315 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.335583 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.342975 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.347822 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.350959 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.354070 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.357783 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.360963 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.365380 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.368603 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.372529 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.375788 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.379002 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.382257 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.385451 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:22:55.388171 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:22:55.388765 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.391841 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.397146 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.400398 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.404287 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.407673 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.411721 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.415712 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.419269 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.422399 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.425630 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.428877 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.432114 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.440632 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:22:55.444856 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:22:55.449704 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:22:55.464554 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:22:55.464568 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.464571 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.464780 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:22:55.464784 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.465158 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:22:55.465481 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:22:55.465485 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:22:55.465488 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:22:55.466061 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:22:55.466067 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:22:55.466390 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:22:55.466393 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.466396 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.466623 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:22:55.466627 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.466629 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:22:55.466859 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:22:55.466863 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.467881 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:22:55.467889 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.468106 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:22:55.468110 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.468113 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.468454 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:22:55.468458 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:22:55.469182 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:22:55.469188 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.469191 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.469373 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:22:55.469376 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.469378 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.470395 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:22:55.470404 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.470407 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.470907 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:22:55.470938 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:22:55.470943 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:22:55.470945 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.472653 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:22:55.472662 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:22:55.472665 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:22:55.473650 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:22:55.473659 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.473662 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:22:55.473963 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:22:55.473967 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.474000 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:22:55.474882 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:22:55.474888 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:22:55.474891 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:22:55.476268 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:22:55.479539 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:22:55.479549 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:22:55.669306 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:22:55.669315 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:22:55.669498 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:22:55.669672 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:22:55.669688 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:22:55.669715 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:22:55.669724 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:22:55.669735 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:22:55.669734 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:22:55.669763 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:22:55.669777 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:22:55.669782 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:22:55.669819 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:22:55.669844 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:22:55.669850 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:22:55.669862 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:22:55.669900 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:22:55.669905 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:22:55.669938 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:22:55.669944 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:22:55.670150 1 controller.go:142] Starting OpenAPI controller + I1102 23:22:55.670165 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:22:55.670175 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:22:55.670186 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:22:55.670200 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:22:55.670209 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:22:55.670219 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:22:55.670260 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:22:55.670264 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:22:55.670368 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:22:55.670375 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:22:55.670393 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:22:55.670397 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:22:55.670408 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:22:55.670410 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:22:55.670590 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:22:55.670728 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:22:55.670900 1 system_namespaces_controller.go:66] Starting system namespaces controller + E1102 23:22:55.741677 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" + I1102 23:22:55.742644 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:22:55.748865 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:22:55.748877 1 policy_source.go:240] refreshing policies + I1102 23:22:55.770012 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:22:55.770030 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:22:55.770055 1 aggregator.go:171] initial CRD sync complete... + I1102 23:22:55.770011 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:22:55.770064 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:22:55.770066 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:22:55.770073 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:22:55.770068 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:22:55.770080 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:22:55.770078 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:22:55.770056 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:22:55.770282 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:22:55.770439 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:22:55.770488 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:22:55.770491 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:22:55.770505 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:22:55.772109 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:22:55.776034 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:22:55.779333 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:22:55.785078 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:22:55.786477 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:22:55.943731 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:22:56.672172 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:22:56.674198 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:22:56.674206 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:22:56.890674 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:22:56.909141 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:22:56.974622 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:22:56.977613 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2] + I1102 23:22:56.978186 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:22:56.980304 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:22:57.679163 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:22:57.976828 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:22:57.980840 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:22:57.984509 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:23:03.530349 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:23:03.631068 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:03.633117 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:03.780790 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:23:39.806030 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.104.16.177"} + E1102 23:23:49.127299 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:59798: use of closed network connection + E1102 23:23:49.236727 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39050: use of closed network connection + E1102 23:23:49.357743 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39060: use of closed network connection + E1102 23:23:54.450291 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39074: use of closed network connection + E1102 23:23:54.522134 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39090: use of closed network connection + E1102 23:23:54.618905 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39112: use of closed network connection + E1102 23:23:54.683601 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39136: use of closed network connection + E1102 23:23:54.747060 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:39152: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + /etc/cni/net.d/1-k8s.conflist + + { + "cniVersion": "0.4.0", + "name": "bridge", + "plugins": [ + { + "type": "bridge", + "bridge": "bridge", + "addIf": "true", + "isDefaultGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "10.244.0.0/16" + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0@if318: mtu 1500 qdisc noqueue state UP group default + link/ether aa:e3:d6:07:48:22 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.76.2/24 brd 192.168.76.255 scope global eth0 + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 56:4d:c8:00:ac:52 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 4: bridge: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 8e:58:b9:a7:02:a1 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/16 brd 10.244.255.255 scope global bridge + valid_lft forever preferred_lft forever + inet6 fe80::8c58:b9ff:fea7:2a1/64 scope link + valid_lft forever preferred_lft forever + 5: vethbd9ca9f0@if2: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether 9a:6f:20:8b:be:27 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::986f:20ff:fe8b:be27/64 scope link + valid_lft forever preferred_lft forever + 6: veth2b84ccc2@if2: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether a2:df:d5:3e:d7:23 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::a0df:d5ff:fe3e:d723/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.76.1 dev eth0 + 10.244.0.0/16 dev bridge proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.76.0/24 dev eth0 proto kernel scope link src 192.168.76.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:23:56 2025 + *mangle + :PREROUTING ACCEPT [23923:56290984] + :INPUT ACCEPT [23864:56286746] + :FORWARD ACCEPT [59:4238] + :OUTPUT ACCEPT [18281:5715630] + :POSTROUTING ACCEPT [18340:5719868] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:23:56 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:23:56 2025 + *filter + :INPUT ACCEPT [4627:1037349] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4570:1339507] + :CNI-ADMIN - [0:0] + :CNI-FORWARD - [0:0] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -m comment --comment "CNI firewall plugin rules" -j CNI-FORWARD + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A CNI-FORWARD -m comment --comment "CNI firewall plugin admin overrides" -j CNI-ADMIN + -A CNI-FORWARD -d 10.244.0.2/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.2/32 -j ACCEPT + -A CNI-FORWARD -d 10.244.0.3/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.3/32 -j ACCEPT + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:23:56 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:23:56 2025 + *nat + :PREROUTING ACCEPT [38:2280] + :INPUT ACCEPT [38:2280] + :OUTPUT ACCEPT [60:3600] + :POSTROUTING ACCEPT [60:3600] + :CNI-3e42b6b7a5e13cef33897519 - [0:0] + :CNI-ec84fbd9fd440e4a002a4f2b - [0:0] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-M66F6TD25XSFZOMV - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.76.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -s 10.244.0.2/32 -m comment --comment "name: \"bridge\" id: \"28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956\"" -j CNI-3e42b6b7a5e13cef33897519 + -A POSTROUTING -s 10.244.0.3/32 -m comment --comment "name: \"bridge\" id: \"42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6\"" -j CNI-ec84fbd9fd440e4a002a4f2b + -A CNI-3e42b6b7a5e13cef33897519 -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956\"" -j ACCEPT + -A CNI-3e42b6b7a5e13cef33897519 ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956\"" -j MASQUERADE + -A CNI-ec84fbd9fd440e4a002a4f2b -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6\"" -j ACCEPT + -A CNI-ec84fbd9fd440e4a002a4f2b ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6\"" -j MASQUERADE + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:45227 + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:49232 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 45227 -j SNAT --to-source 192.168.76.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 49232 -j SNAT --to-source 192.168.76.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-M66F6TD25XSFZOMV -s 192.168.76.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-M66F6TD25XSFZOMV -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.76.2:8443 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.104.16.177/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.76.2:8443" -j KUBE-SEP-M66F6TD25XSFZOMV + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.104.16.177/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:23:56 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 39 packets, 2340 bytes) + pkts bytes target prot opt in out source destination + 54 3400 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 47 2820 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 39 packets, 2340 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 60 packets, 3600 bytes) + pkts bytes target prot opt in out source destination + 552 45146 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 338 32440 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 99 5940 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 60 packets, 3600 bytes) + pkts bytes target prot opt in out source destination + 562 45881 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.76.1 + 3 180 CNI-3e42b6b7a5e13cef33897519 0 -- * * 10.244.0.2 0.0.0.0/0 /* name: "bridge" id: "28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956" */ + 9 675 CNI-ec84fbd9fd440e4a002a4f2b 0 -- * * 10.244.0.3 0.0.0.0/0 /* name: "bridge" id: "42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6" */ + + Chain CNI-3e42b6b7a5e13cef33897519 (1 references) + pkts bytes target prot opt in out source destination + 0 0 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956" */ + 3 180 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956" */ + + Chain CNI-ec84fbd9fd440e4a002a4f2b (1 references) + pkts bytes target prot opt in out source destination + 9 675 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6" */ + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6" */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.76.1 tcp dpt:53 to:127.0.0.11:45227 + 339 32525 DNAT 17 -- * * 0.0.0.0/0 192.168.76.1 udp dpt:53 to:127.0.0.11:49232 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:45227 to:192.168.76.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:49232 to:192.168.76.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 69 4275 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-M66F6TD25XSFZOMV (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 192.168.76.2 0.0.0.0/0 /* default/kubernetes:https */ + 2 120 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.76.2:8443 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.104.16.177 /* default/netcat cluster IP */ tcp dpt:8080 + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 97 5820 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 2 120 KUBE-SEP-M66F6TD25XSFZOMV 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.76.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.104.16.177 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 54s daemonset-controller Created pod: kube-proxy-9w7hc + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-9w7hc + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: auto-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:23:03 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://9c0ad8c63852773a0a6d2a289c1aec537e55c3ebae43bbf8ad5485df21993a6b + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:23:04 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wh4p6 (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-wh4p6: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 54s default-scheduler Successfully assigned kube-system/kube-proxy-9w7hc to auto-999044 + Normal Pulled 53s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 53s kubelet Created container: kube-proxy + Normal Started 53s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:23:04.358146 1 server_linux.go:53] "Using iptables proxy" + I1102 23:23:04.395642 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:23:04.495768 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:23:04.495785 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.76.2"] + E1102 23:23:04.495835 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:23:04.510084 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:23:04.510106 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:23:04.513257 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:23:04.513440 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:23:04.513447 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:23:04.514052 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:23:04.514066 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:23:04.514116 1 config.go:309] "Starting node config controller" + I1102 23:23:04.514178 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:23:04.514214 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:23:04.514221 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:23:04.514270 1 config.go:200] "Starting service config controller" + I1102 23:23:04.514278 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:23:04.614776 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:23:04.614788 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:23:04.614779 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + I1102 23:23:04.614799 1 shared_informer.go:356] "Caches are synced" controller="service config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://auto-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://auto-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:22:57 UTC; 59s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2242 (kubelet) + Tasks: 16 (limit: 629145) + Memory: 32.7M + CPU: 1.035s + CGroup: /system.slice/kubelet.service + └─2242 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=auto-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.874890 2242 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v" (OuterVolumeSpecName: "kube-api-access-4vn2v") pod "610a8793-4eff-41ec-85b6-f04b51443320" (UID: "610a8793-4eff-41ec-85b6-f04b51443320"). InnerVolumeSpecName "kube-api-access-4vn2v". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.974173 2242 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/610a8793-4eff-41ec-85b6-f04b51443320-config-volume\") on node \"auto-999044\" DevicePath \"\"" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.974188 2242 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4vn2v\" (UniqueName: \"kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v\") on node \"auto-999044\" DevicePath \"\"" + Nov 02 23:23:06 auto-999044 kubelet[2242]: I1102 23:23:06.531146 2242 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:23:07 auto-999044 kubelet[2242]: I1102 23:23:07.748102 2242 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="610a8793-4eff-41ec-85b6-f04b51443320" path="/var/lib/kubelet/pods/610a8793-4eff-41ec-85b6-f04b51443320/volumes" + Nov 02 23:23:08 auto-999044 kubelet[2242]: I1102 23:23:08.084642 2242 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:23:08 auto-999044 kubelet[2242]: I1102 23:23:08.085044 2242 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:23:36 auto-999044 kubelet[2242]: I1102 23:23:36.899461 2242 scope.go:117] "RemoveContainer" containerID="4f07a6c8f344edcafc4b1bd06974785d38344fce09e85f95d0d7748b4bde9d42" + Nov 02 23:23:40 auto-999044 kubelet[2242]: I1102 23:23:40.969031 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkdnk\" (UniqueName: \"kubernetes.io/projected/a4a5ce94-fb38-422d-a463-338a39c9552d-kube-api-access-pkdnk\") pod \"netcat-cd4db9dbf-klkm2\" (UID: \"a4a5ce94-fb38-422d-a463-338a39c9552d\") " pod="default/netcat-cd4db9dbf-klkm2" + Nov 02 23:23:42 auto-999044 kubelet[2242]: I1102 23:23:42.938956 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-klkm2" podStartSLOduration=2.524758339 podStartE2EDuration="3.938941421s" podCreationTimestamp="2025-11-02 23:23:39 +0000 UTC" firstStartedPulling="2025-11-02 23:23:41.314132427 +0000 UTC m=+43.632333203" lastFinishedPulling="2025-11-02 23:23:42.72831551 +0000 UTC m=+45.046516285" observedRunningTime="2025-11-02 23:23:42.938750835 +0000 UTC m=+45.256951619" watchObservedRunningTime="2025-11-02 23:23:42.938941421 +0000 UTC m=+45.257142202" + + + >>> host: kubelet daemon config: + # ]8;;file://auto-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://auto-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=auto-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:22:50 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:22:50 auto-999044 kubelet[1551]: E1102 23:22:50.323205 1551 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:22:50 auto-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:22:50 auto-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:22:50 auto-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:22:50 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:22:51 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:22:51 auto-999044 kubelet[1643]: E1102 23:22:51.027663 1643 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:22:51 auto-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:22:51 auto-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:22:51 auto-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:22:51 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:22:51 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:22:51 auto-999044 kubelet[1726]: E1102 23:22:51.770266 1726 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:22:51 auto-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:22:51 auto-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:22:52 auto-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:22:52 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:22:52 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:22:52 auto-999044 kubelet[1737]: E1102 23:22:52.525248 1737 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:22:52 auto-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:22:52 auto-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:22:53 auto-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:22:53 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:22:53 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:22:53 auto-999044 kubelet[1748]: E1102 23:22:53.270198 1748 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:22:53 auto-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:22:53 auto-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:22:53 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:22:53 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.846113 1781 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.846163 1781 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.846185 1781 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.846188 1781 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.846332 1781 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.849398 1781 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.76.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.849507 1781 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.851503 1781 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854664 1781 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854679 1781 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854788 1781 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854799 1781 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"auto-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854880 1781 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854885 1781 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.854968 1781 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.855602 1781 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.855708 1781 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.855717 1781 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.855731 1781 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.855746 1781 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.856411 1781 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.76.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dauto-999044&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.856469 1781 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.76.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.856672 1781 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.857352 1781 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.857374 1781 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:22:53 auto-999044 kubelet[1781]: W1102 23:22:53.857408 1781 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.857870 1781 server.go:1262] "Started kubelet" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.857925 1781 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.857960 1781 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858007 1781 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858188 1781 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.858153 1781 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.76.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.76.2:8443: connect: connection refused" event="&Event{ObjectMeta:{auto-999044.1874540e205b9b14 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:auto-999044,UID:auto-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:auto-999044,},FirstTimestamp:2025-11-02 23:22:53.857856276 +0000 UTC m=+0.190418935,LastTimestamp:2025-11-02 23:22:53.857856276 +0000 UTC m=+0.190418935,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:auto-999044,}" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858303 1781 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858415 1781 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858656 1781 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858690 1781 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858752 1781 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.858803 1781 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.859008 1781 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"auto-999044\" not found" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.859050 1781 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/auto-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.859143 1781 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.76.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.859152 1781 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.859274 1781 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.860669 1781 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.865832 1781 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.865840 1781 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.865855 1781 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.866304 1781 policy_none.go:49] "None policy: Start" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.866316 1781 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.866325 1781 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.866643 1781 policy_none.go:47] "Start" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.869367 1781 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.870071 1781 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.870083 1781 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.870103 1781 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.870137 1781 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.870393 1781 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.76.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.891472 1781 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.891540 1781 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.891551 1781 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.891651 1781 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.891989 1781 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.892015 1781 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"auto-999044\" not found" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.983459 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.986230 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:53 auto-999044 kubelet[1781]: I1102 23:22:53.992114 1781 kubelet_node_status.go:75] "Attempting to register node" node="auto-999044" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.992303 1781 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="auto-999044" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.994985 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:53 auto-999044 kubelet[1781]: E1102 23:22:53.996944 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059190 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/176cf2a6f8cb9b004fb20e6b3dac36b7-etcd-certs\") pod \"etcd-auto-999044\" (UID: \"176cf2a6f8cb9b004fb20e6b3dac36b7\") " pod="kube-system/etcd-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059212 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-k8s-certs\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059224 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-usr-local-share-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059233 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-ca-certs\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059244 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-etc-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059257 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-k8s-certs\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059269 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-ca-certs\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059277 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-usr-share-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059298 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-flexvolume-dir\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.059297 1781 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/auto-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059343 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/206186728fc30cc825dbab83f564c27f-kubeconfig\") pod \"kube-scheduler-auto-999044\" (UID: \"206186728fc30cc825dbab83f564c27f\") " pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059362 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/176cf2a6f8cb9b004fb20e6b3dac36b7-etcd-data\") pod \"etcd-auto-999044\" (UID: \"176cf2a6f8cb9b004fb20e6b3dac36b7\") " pod="kube-system/etcd-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059380 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-etc-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059394 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-kubeconfig\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059407 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-usr-local-share-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.059430 1781 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-usr-share-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.192993 1781 kubelet_node_status.go:75] "Attempting to register node" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.193162 1781 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.460017 1781 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/auto-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:22:54 auto-999044 kubelet[1781]: I1102 23:22:54.594294 1781 kubelet_node_status.go:75] "Attempting to register node" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.877447 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.881428 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.884695 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:54 auto-999044 kubelet[1781]: E1102 23:22:54.887837 1781 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.682295 1781 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"auto-999044\" not found" node="auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.784501 1781 kubelet_node_status.go:78] "Successfully registered node" node="auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.784528 1781 kubelet_node_status.go:486] "Error updating node status, will retry" err="error getting node \"auto-999044\": node \"auto-999044\" not found" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.856937 1781 apiserver.go:52] "Watching apiserver" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.859118 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.859127 1781 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.861582 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.861598 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.862491 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.862505 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.863352 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.863368 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.864130 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.890116 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.890174 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: I1102 23:22:55.890216 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.891130 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.891187 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:55 auto-999044 kubelet[1781]: E1102 23:22:55.891228 1781 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-auto-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:56 auto-999044 kubelet[1781]: I1102 23:22:56.892846 1781 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:57 auto-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:22:57 auto-999044 kubelet[1781]: I1102 23:22:57.660707 1781 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:22:57 auto-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:22:57 auto-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:22:57 auto-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.709878 2242 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.709939 2242 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.709961 2242 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.709971 2242 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.710141 2242 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.711033 2242 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.712571 2242 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.715868 2242 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.719883 2242 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.719908 2242 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720107 2242 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720125 2242 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"auto-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720245 2242 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720252 2242 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720274 2242 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720769 2242 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720900 2242 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720923 2242 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720943 2242 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.720955 2242 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.721543 2242 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.721859 2242 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.721874 2242 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.722379 2242 server.go:1262] "Started kubelet" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.722476 2242 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.722625 2242 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.722690 2242 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.725506 2242 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.725807 2242 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:22:57 auto-999044 kubelet[2242]: E1102 23:22:57.726295 2242 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"auto-999044\" not found" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.725511 2242 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.727642 2242 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.728649 2242 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.729064 2242 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.729276 2242 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.732369 2242 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.736406 2242 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.738075 2242 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.743116 2242 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.743933 2242 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.743954 2242 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.743997 2242 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:22:57 auto-999044 kubelet[2242]: E1102 23:22:57.744038 2242 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.752794 2242 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.752804 2242 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.752819 2242 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.752981 2242 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.752992 2242 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.753005 2242 policy_none.go:49] "None policy: Start" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.753012 2242 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.753022 2242 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.753091 2242 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.753097 2242 policy_none.go:47] "Start" + Nov 02 23:22:57 auto-999044 kubelet[2242]: E1102 23:22:57.755574 2242 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.755679 2242 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.755690 2242 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:22:57 auto-999044 kubelet[2242]: E1102 23:22:57.756329 2242 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.756529 2242 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.844679 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.844721 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.844794 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.844888 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: E1102 23:22:57.848123 2242 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-auto-999044\" already exists" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.856480 2242 kubelet_node_status.go:75] "Attempting to register node" node="auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.859572 2242 kubelet_node_status.go:124] "Node was previously registered" node="auto-999044" + Nov 02 23:22:57 auto-999044 kubelet[2242]: I1102 23:22:57.859624 2242 kubelet_node_status.go:78] "Successfully registered node" node="auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030385 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-k8s-certs\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030412 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-usr-local-share-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030429 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-etc-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030442 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-flexvolume-dir\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030457 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-usr-local-share-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030472 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/176cf2a6f8cb9b004fb20e6b3dac36b7-etcd-data\") pod \"etcd-auto-999044\" (UID: \"176cf2a6f8cb9b004fb20e6b3dac36b7\") " pod="kube-system/etcd-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030484 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-ca-certs\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030498 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-k8s-certs\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030537 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/176cf2a6f8cb9b004fb20e6b3dac36b7-etcd-certs\") pod \"etcd-auto-999044\" (UID: \"176cf2a6f8cb9b004fb20e6b3dac36b7\") " pod="kube-system/etcd-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030560 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-ca-certs\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030570 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/206186728fc30cc825dbab83f564c27f-kubeconfig\") pod \"kube-scheduler-auto-999044\" (UID: \"206186728fc30cc825dbab83f564c27f\") " pod="kube-system/kube-scheduler-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030582 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-usr-share-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030606 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-kubeconfig\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030634 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4f96db8544dc29983b9cb6020ed0adb8-usr-share-ca-certificates\") pod \"kube-controller-manager-auto-999044\" (UID: \"4f96db8544dc29983b9cb6020ed0adb8\") " pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.030657 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/db99d263c18f0527b65667b4dd8e6d9b-etc-ca-certificates\") pod \"kube-apiserver-auto-999044\" (UID: \"db99d263c18f0527b65667b4dd8e6d9b\") " pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.721887 2242 apiserver.go:52] "Watching apiserver" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.729591 2242 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.767479 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.767628 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.768412 2242 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: E1102 23:22:58.774098 2242 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-auto-999044\" already exists" pod="kube-system/etcd-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: E1102 23:22:58.774248 2242 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-auto-999044\" already exists" pod="kube-system/kube-controller-manager-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: E1102 23:22:58.777101 2242 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-auto-999044\" already exists" pod="kube-system/kube-apiserver-auto-999044" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.791114 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-auto-999044" podStartSLOduration=2.791096004 podStartE2EDuration="2.791096004s" podCreationTimestamp="2025-11-02 23:22:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:22:58.790969183 +0000 UTC m=+1.109169961" watchObservedRunningTime="2025-11-02 23:22:58.791096004 +0000 UTC m=+1.109296779" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.794538 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-auto-999044" podStartSLOduration=1.7945287049999998 podStartE2EDuration="1.794528705s" podCreationTimestamp="2025-11-02 23:22:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:22:58.794489744 +0000 UTC m=+1.112690526" watchObservedRunningTime="2025-11-02 23:22:58.794528705 +0000 UTC m=+1.112729478" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.801908 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-auto-999044" podStartSLOduration=1.801901422 podStartE2EDuration="1.801901422s" podCreationTimestamp="2025-11-02 23:22:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:22:58.798592195 +0000 UTC m=+1.116792972" watchObservedRunningTime="2025-11-02 23:22:58.801901422 +0000 UTC m=+1.120102198" + Nov 02 23:22:58 auto-999044 kubelet[2242]: I1102 23:22:58.805754 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-auto-999044" podStartSLOduration=1.805745789 podStartE2EDuration="1.805745789s" podCreationTimestamp="2025-11-02 23:22:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:22:58.80203794 +0000 UTC m=+1.120238723" watchObservedRunningTime="2025-11-02 23:22:58.805745789 +0000 UTC m=+1.123946691" + Nov 02 23:23:00 auto-999044 kubelet[2242]: I1102 23:23:00.094718 2242 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.864825 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/7940610a-a7bb-457a-ad86-e4d90d0bcee7-kube-proxy\") pod \"kube-proxy-9w7hc\" (UID: \"7940610a-a7bb-457a-ad86-e4d90d0bcee7\") " pod="kube-system/kube-proxy-9w7hc" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.864856 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/7940610a-a7bb-457a-ad86-e4d90d0bcee7-xtables-lock\") pod \"kube-proxy-9w7hc\" (UID: \"7940610a-a7bb-457a-ad86-e4d90d0bcee7\") " pod="kube-system/kube-proxy-9w7hc" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.864868 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh4p6\" (UniqueName: \"kubernetes.io/projected/7940610a-a7bb-457a-ad86-e4d90d0bcee7-kube-api-access-wh4p6\") pod \"kube-proxy-9w7hc\" (UID: \"7940610a-a7bb-457a-ad86-e4d90d0bcee7\") " pod="kube-system/kube-proxy-9w7hc" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.864880 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7940610a-a7bb-457a-ad86-e4d90d0bcee7-lib-modules\") pod \"kube-proxy-9w7hc\" (UID: \"7940610a-a7bb-457a-ad86-e4d90d0bcee7\") " pod="kube-system/kube-proxy-9w7hc" + Nov 02 23:23:03 auto-999044 kubelet[2242]: E1102 23:23:03.895309 2242 pod_workers.go:1324] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-4vn2v], unattached volumes=[], failed to process volumes=[config-volume kube-api-access-4vn2v]: context canceled" pod="kube-system/coredns-66bc5c9577-8dgnz" podUID="610a8793-4eff-41ec-85b6-f04b51443320" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965398 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1b992356-b933-4132-805f-e9125c636ce4-config-volume\") pod \"coredns-66bc5c9577-mxnbp\" (UID: \"1b992356-b933-4132-805f-e9125c636ce4\") " pod="kube-system/coredns-66bc5c9577-mxnbp" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965416 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/bde74fcf-8ec2-4a30-9f76-bc7e298f7489-tmp\") pod \"storage-provisioner\" (UID: \"bde74fcf-8ec2-4a30-9f76-bc7e298f7489\") " pod="kube-system/storage-provisioner" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965447 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/610a8793-4eff-41ec-85b6-f04b51443320-config-volume\") pod \"coredns-66bc5c9577-8dgnz\" (UID: \"610a8793-4eff-41ec-85b6-f04b51443320\") " pod="kube-system/coredns-66bc5c9577-8dgnz" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965476 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xznqm\" (UniqueName: \"kubernetes.io/projected/bde74fcf-8ec2-4a30-9f76-bc7e298f7489-kube-api-access-xznqm\") pod \"storage-provisioner\" (UID: \"bde74fcf-8ec2-4a30-9f76-bc7e298f7489\") " pod="kube-system/storage-provisioner" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965577 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjxnk\" (UniqueName: \"kubernetes.io/projected/1b992356-b933-4132-805f-e9125c636ce4-kube-api-access-qjxnk\") pod \"coredns-66bc5c9577-mxnbp\" (UID: \"1b992356-b933-4132-805f-e9125c636ce4\") " pod="kube-system/coredns-66bc5c9577-mxnbp" + Nov 02 23:23:03 auto-999044 kubelet[2242]: I1102 23:23:03.965611 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vn2v\" (UniqueName: \"kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v\") pod \"coredns-66bc5c9577-8dgnz\" (UID: \"610a8793-4eff-41ec-85b6-f04b51443320\") " pod="kube-system/coredns-66bc5c9577-8dgnz" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.793554 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-9w7hc" podStartSLOduration=1.7935406550000002 podStartE2EDuration="1.793540655s" podCreationTimestamp="2025-11-02 23:23:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:04.793296181 +0000 UTC m=+7.111496957" watchObservedRunningTime="2025-11-02 23:23:04.793540655 +0000 UTC m=+7.111741433" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.798897 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.798885292 podStartE2EDuration="1.798885292s" podCreationTimestamp="2025-11-02 23:23:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:04.798798055 +0000 UTC m=+7.116998840" watchObservedRunningTime="2025-11-02 23:23:04.798885292 +0000 UTC m=+7.117086075" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.808130 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-mxnbp" podStartSLOduration=1.808119472 podStartE2EDuration="1.808119472s" podCreationTimestamp="2025-11-02 23:23:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:04.808057805 +0000 UTC m=+7.126258588" watchObservedRunningTime="2025-11-02 23:23:04.808119472 +0000 UTC m=+7.126320252" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.873482 2242 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/610a8793-4eff-41ec-85b6-f04b51443320-config-volume\") pod \"610a8793-4eff-41ec-85b6-f04b51443320\" (UID: \"610a8793-4eff-41ec-85b6-f04b51443320\") " + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.873507 2242 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vn2v\" (UniqueName: \"kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v\") pod \"610a8793-4eff-41ec-85b6-f04b51443320\" (UID: \"610a8793-4eff-41ec-85b6-f04b51443320\") " + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.873781 2242 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/610a8793-4eff-41ec-85b6-f04b51443320-config-volume" (OuterVolumeSpecName: "config-volume") pod "610a8793-4eff-41ec-85b6-f04b51443320" (UID: "610a8793-4eff-41ec-85b6-f04b51443320"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.874890 2242 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v" (OuterVolumeSpecName: "kube-api-access-4vn2v") pod "610a8793-4eff-41ec-85b6-f04b51443320" (UID: "610a8793-4eff-41ec-85b6-f04b51443320"). InnerVolumeSpecName "kube-api-access-4vn2v". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.974173 2242 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/610a8793-4eff-41ec-85b6-f04b51443320-config-volume\") on node \"auto-999044\" DevicePath \"\"" + Nov 02 23:23:04 auto-999044 kubelet[2242]: I1102 23:23:04.974188 2242 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4vn2v\" (UniqueName: \"kubernetes.io/projected/610a8793-4eff-41ec-85b6-f04b51443320-kube-api-access-4vn2v\") on node \"auto-999044\" DevicePath \"\"" + Nov 02 23:23:06 auto-999044 kubelet[2242]: I1102 23:23:06.531146 2242 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:23:07 auto-999044 kubelet[2242]: I1102 23:23:07.748102 2242 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="610a8793-4eff-41ec-85b6-f04b51443320" path="/var/lib/kubelet/pods/610a8793-4eff-41ec-85b6-f04b51443320/volumes" + Nov 02 23:23:08 auto-999044 kubelet[2242]: I1102 23:23:08.084642 2242 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:23:08 auto-999044 kubelet[2242]: I1102 23:23:08.085044 2242 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:23:36 auto-999044 kubelet[2242]: I1102 23:23:36.899461 2242 scope.go:117] "RemoveContainer" containerID="4f07a6c8f344edcafc4b1bd06974785d38344fce09e85f95d0d7748b4bde9d42" + Nov 02 23:23:40 auto-999044 kubelet[2242]: I1102 23:23:40.969031 2242 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkdnk\" (UniqueName: \"kubernetes.io/projected/a4a5ce94-fb38-422d-a463-338a39c9552d-kube-api-access-pkdnk\") pod \"netcat-cd4db9dbf-klkm2\" (UID: \"a4a5ce94-fb38-422d-a463-338a39c9552d\") " pod="default/netcat-cd4db9dbf-klkm2" + Nov 02 23:23:42 auto-999044 kubelet[2242]: I1102 23:23:42.938956 2242 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-klkm2" podStartSLOduration=2.524758339 podStartE2EDuration="3.938941421s" podCreationTimestamp="2025-11-02 23:23:39 +0000 UTC" firstStartedPulling="2025-11-02 23:23:41.314132427 +0000 UTC m=+43.632333203" lastFinishedPulling="2025-11-02 23:23:42.72831551 +0000 UTC m=+45.046516285" observedRunningTime="2025-11-02 23:23:42.938750835 +0000 UTC m=+45.256951619" watchObservedRunningTime="2025-11-02 23:23:42.938941421 +0000 UTC m=+45.257142202" + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.76.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:auto-999044 + name: system:node:auto-999044@mk + current-context: system:node:auto-999044@mk + kind: Config + users: + - name: system:node:auto-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:03 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: auto-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: calico-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kindnet-999044 + contexts: + - context: + cluster: auto-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:03 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: auto-999044 + name: auto-999044 + - context: + cluster: calico-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: calico-999044 + name: calico-999044 + - context: + cluster: kindnet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kindnet-999044 + name: kindnet-999044 + current-context: calico-999044 + kind: Config + users: + - name: auto-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.key + - name: calico-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.key + - name: kindnet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:03Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "315" + uid: 350950e8-1d5d-4181-a0dc-6c88b594132a + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:03Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "316" + uid: 85798f8b-26e1-4b73-80d7-19db56ed1141 + - apiVersion: v1 + data: + jws-kubeconfig-htbhfe: eyJhbGciOiJIUzI1NiIsImtpZCI6Imh0YmhmZSJ9..s55lqNYYqHnG06Dk3B7Z3TrHA3jcBmOnv38pdOL9qh8 + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:57Z" + name: cluster-info + namespace: kube-public + resourceVersion: "302" + uid: 6e4fd4db-a850-466e-8677-edccdd983251 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:03Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "317" + uid: e8b8cf87-974a-4f11-9975-9da1d406035d + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.76.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:57Z" + name: coredns + namespace: kube-system + resourceVersion: "330" + uid: 249f02cf-06cd-4f29-afee-6f0cbf383f26 + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIT5GawXDNt4UwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMxNzUxWhcNMzUxMDMxMjMyMjUx + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAKONfbuAh3UI9sElyANTUWQ79A2k579Lt+gyu7+lVP+blcJM + z6ARnuY1GetTEYxHvAzu64rQ2xy0HY/aZElZ14XEgVJy8banj/gLthQYpSs9SSik + SBOd9QoknI+VwQoyT1BKnBg717ZhOTPoDEyHL7Bs1c790HJyUxjv2oG2ED/AiZzO + VtDbF7YUDSOX8tyto2XEZDbA2S6VXsTdIY8GhEBF/S6nCsqdSaGNiYXzYi8rhqHU + sKmAeirGRu0BTeVT5yhwLQoZHb5evd21kEFCFLMG8Lb/Fq86OdREPbBayTqP4vkM + PSOBD3ryYrocMf+4VoVVvgEQtVXcsIBCdE36qekCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFDrniGyckJV7fwsKrNtj + 9qE9jxIyMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQA/eVdBJXt/3phr/NQhwovBkakGq+qFPnjKsXTOlY0NGp1wLTqGGnB82wFz + zvpC4R7gpv8/dxIymuIDjUpS452yuUVo7WEA04MGXcZyEN0s1KT+ojC6Z7gThXHE + 02hMiFrJg6bZv4gRVg5QSULA0z7oZAsk5LeItyM3Ez2IXegU8ZdO1XAOrBR2E1YH + 58nkAqhD1GMANMzI1n8ihY9QBKIRfk+QR7g2FTKId3Vd9CBNRSutvqzvBsyS/2k1 + +osc747uPb1aIc4+sndgLZYinX4Lg6g6n9H97kVnSGK3sSKGRgh2ygnN3EQmoS5o + k6VSzz/LyBuNoizDLVSZJ02IfAEV + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:55Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "33" + uid: 4bc05186-fd2f-4b92-9403-caca2d990487 + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:55Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "64" + uid: 57a7d417-190b-41df-8d3f-4636d966eef9 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:57Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "251" + uid: ba46a61f-2f0a-4f6e-b742-ad1959cf2758 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:03Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "318" + uid: a645cbc6-81d1-4e3a-8829-c5e32cc73fec + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.76.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:57Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "208" + uid: c01ce6d7-393c-4cec-a0b6-8aadabc5a4d1 + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:22:57Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "211" + uid: 78662d3f-9a98-49cb-adbd-5ad08717346e + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://auto-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:22:49 UTC; 1min 8s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1059 (dockerd) + Tasks: 14 + Memory: 170.4M + CPU: 2.490s + CGroup: /system.slice/docker.service + └─1059 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.743001888Z" level=info msg="Loading containers: done." + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.748004494Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.748034658Z" level=info msg="Initializing buildkit" + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.759000104Z" level=info msg="Completed buildkit initialization" + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.761382626Z" level=info msg="Daemon has completed initialization" + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.761420591Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.761462439Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:22:49 auto-999044 dockerd[1059]: time="2025-11-02T23:22:49.761476068Z" level=info msg="API listen on [::]:2376" + Nov 02 23:22:49 auto-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:23:34 auto-999044 dockerd[1059]: time="2025-11-02T23:23:34.413814451Z" level=info msg="ignoring event" container=4f07a6c8f344edcafc4b1bd06974785d38344fce09e85f95d0d7748b4bde9d42 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://auto-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 17 + Running: 16 + Paused: 0 + Stopped: 1 + Images: 9 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: auto-999044 + ID: 825d3878-e53f-41a0-b29f-8b6b1596f815 + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://auto-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://auto-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:22:50 UTC; 1min 9s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1368 (cri-dockerd) + Tasks: 13 + Memory: 16.9M + CPU: 659ms + CGroup: /system.slice/cri-docker.service + └─1368 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:22:54 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:22:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/b2ecc333d57b3d3b593a14ce0d9844ffbeb64d2f5fb0ecba9a0f792841cb477e/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:22:54 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:22:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/346d207f8fe183c2f3531fbf48f48d62273093c4a31bf4c4c79c45da485cd4fb/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:22:54 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:22:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/3025f09650fa3ece406efd97d351340dd38c9c1d71055d2d1ae3de5eee776c04/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:22:54 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:22:54Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/07a95d7d60e4839e93166efff417cc200b20d89a1c348bdbf5a94adec75b4c55/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:04 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:04Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/33071dea37c65d75e6e9dd416c56b44bb454cadba926178fe7a35554728cfc70/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:04 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:04Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:04 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:04Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c924501b56abb8a2de06831a9f0f30a2361d6e6a5900d894e7d614d115d27286/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:08 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:08Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:23:41 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:41Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:42 auto-999044 cri-dockerd[1368]: time="2025-11-02T23:23:42Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://auto-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://auto-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://auto-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:22:49 UTC; 1min 10s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1044 (containerd) + Tasks: 188 + Memory: 93.7M + CPU: 1.030s + CGroup: /system.slice/containerd.service + ├─1044 /usr/bin/containerd + ├─1836 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b2ecc333d57b3d3b593a14ce0d9844ffbeb64d2f5fb0ecba9a0f792841cb477e -address /run/containerd/containerd.sock + ├─1837 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 3025f09650fa3ece406efd97d351340dd38c9c1d71055d2d1ae3de5eee776c04 -address /run/containerd/containerd.sock + ├─1898 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 346d207f8fe183c2f3531fbf48f48d62273093c4a31bf4c4c79c45da485cd4fb -address /run/containerd/containerd.sock + ├─1900 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 07a95d7d60e4839e93166efff417cc200b20d89a1c348bdbf5a94adec75b4c55 -address /run/containerd/containerd.sock + ├─2024 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b676e439e2a8544e821428dd21240c25086ad24ad5f6b1743d47392acc5aae25 -address /run/containerd/containerd.sock + ├─2026 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 63337d019d0961a1fadd4ecfdbb502635ba89dbb21f803570e4747f732e3c71b -address /run/containerd/containerd.sock + ├─2028 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b81bf8c1821ccee7bc60019e6192de5b3463557fe9f49bb7c5c17fa96da051f9 -address /run/containerd/containerd.sock + ├─2078 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 00d3e061ae4b0694347a44049217016b774686c335d2038bbb15bd11906f735e -address /run/containerd/containerd.sock + ├─2553 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 33071dea37c65d75e6e9dd416c56b44bb454cadba926178fe7a35554728cfc70 -address /run/containerd/containerd.sock + ├─2597 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 28d64c4d85acec648924025d9b0a977bfe671d58430857f1d5f9cf13f288e956 -address /run/containerd/containerd.sock + ├─2635 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9c0ad8c63852773a0a6d2a289c1aec537e55c3ebae43bbf8ad5485df21993a6b -address /run/containerd/containerd.sock + ├─2700 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c924501b56abb8a2de06831a9f0f30a2361d6e6a5900d894e7d614d115d27286 -address /run/containerd/containerd.sock + ├─2778 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1c82279a9cb5eef9af9728cf00efc3297102cdc50b2d00f3d6f438c05e7b1fd0 -address /run/containerd/containerd.sock + ├─3186 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b2556765f738367b5d6abc570d45bb5cffb7bbbfcdbfb805c4875cd8bf8fc4f8 -address /run/containerd/containerd.sock + ├─3267 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 42fd62c06fc88d6660fb4ffee19373915c59ee6e593016a5e9a6eb6d158cd7e6 -address /run/containerd/containerd.sock + └─3403 /usr/bin/containerd-shim-runc-v2 -namespace moby -id d26403f5e5773f80d7cc8fa96e8ec45de94311b4717f5b8bcd2665df32960524 -address /run/containerd/containerd.sock + + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577513627Z" level=info msg="Start event monitor" + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577522493Z" level=info msg=serving... address=/run/containerd/containerd.sock + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577525795Z" level=info msg="Start snapshots syncer" + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577533501Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577540885Z" level=info msg="Start streaming server" + Nov 02 23:22:49 auto-999044 containerd[1044]: time="2025-11-02T23:22:49.577579277Z" level=info msg="containerd successfully booted in 0.012670s" + Nov 02 23:22:49 auto-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:23:34 auto-999044 containerd[1044]: time="2025-11-02T23:23:34.413710288Z" level=info msg="shim disconnected" id=4f07a6c8f344edcafc4b1bd06974785d38344fce09e85f95d0d7748b4bde9d42 namespace=moby + Nov 02 23:23:34 auto-999044 containerd[1044]: time="2025-11-02T23:23:34.413749628Z" level=warning msg="cleaning up after shim disconnected" id=4f07a6c8f344edcafc4b1bd06974785d38344fce09e85f95d0d7748b4bde9d42 namespace=moby + Nov 02 23:23:34 auto-999044 containerd[1044]: time="2025-11-02T23:23:34.413757216Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://auto-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://auto-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://auto-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:24:01.904028627Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:01.904045047Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:01.90406609Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:24:01.904079971Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:24:01.904111057Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:24:01.904193674Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: auto-999044 [took: 12.685565378s] -------------------------------- + helpers_test.go:175: Cleaning up "auto-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p auto-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p auto-999044: (2.240657381s) +=== CONT TestNetworkPlugins/group/false +=== RUN TestNetworkPlugins/group/false/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p false-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker --container-runtime=docker +E1102 23:24:05.677076 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:05.683390 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:05.694693 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:05.715956 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:05.757263 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:05.838685 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:06.000038 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:06.321577 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:06.962816 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:08.245291 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:10.807098 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:24:15.929366 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p calico-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=calico --driver=docker --container-runtime=docker: (42.528342209s) +=== RUN TestNetworkPlugins/group/calico/ControllerPod + net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: waiting 10m0s for pods matching "k8s-app=calico-node" in namespace "kube-system" ... + helpers_test.go:352: "calico-node-qcvqc" [fbd84e65-cb83-485b-862f-96e56d2177e0] Running / Ready:ContainersNotReady (containers with unready status: [calico-node]) / ContainersReady:ContainersNotReady (containers with unready status: [calico-node]) + net_test.go:120: (dbg) TestNetworkPlugins/group/calico/ControllerPod: k8s-app=calico-node healthy within 6.002879045s +=== RUN TestNetworkPlugins/group/calico/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p calico-999044 "pgrep -a kubelet" +I1102 23:24:25.751383 37869 config.go:182] Loaded profile config "calico-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/calico/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context calico-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-7nq9q" [8fb21457-8ecf-460b-be19-61ea84636cc0] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) +E1102 23:24:26.171223 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p kindnet-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=kindnet --driver=docker --container-runtime=docker: (53.771856117s) +=== RUN TestNetworkPlugins/group/kindnet/ControllerPod + net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: waiting 10m0s for pods matching "app=kindnet" in namespace "kube-system" ... + helpers_test.go:352: "kindnet-kt8qf" [d44690e1-ff4e-4dcd-a772-c4f53bf4b188] Running + helpers_test.go:352: "netcat-cd4db9dbf-7nq9q" [8fb21457-8ecf-460b-be19-61ea84636cc0] Running + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p custom-flannel-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=testdata/kube-flannel.yaml --driver=docker --container-runtime=docker: (46.156898436s) +=== RUN TestNetworkPlugins/group/custom-flannel/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p custom-flannel-999044 "pgrep -a kubelet" +I1102 23:24:30.191501 37869 config.go:182] Loaded profile config "custom-flannel-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/custom-flannel/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context custom-flannel-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-ntqvl" [4e902ac7-c590-4309-bd14-7af633f026b5] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + net_test.go:120: (dbg) TestNetworkPlugins/group/kindnet/ControllerPod: app=kindnet healthy within 6.00277468s +=== RUN TestNetworkPlugins/group/kindnet/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p kindnet-999044 "pgrep -a kubelet" + helpers_test.go:352: "netcat-cd4db9dbf-ntqvl" [4e902ac7-c590-4309-bd14-7af633f026b5] Running +I1102 23:24:32.373087 37869 config.go:182] Loaded profile config "kindnet-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/kindnet/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context kindnet-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-mssqr" [5dd1b35f-9c84-46a3-b6d8-563d845b737c] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + net_test.go:163: (dbg) TestNetworkPlugins/group/calico/NetCatPod: app=netcat healthy within 9.00195188s +=== RUN TestNetworkPlugins/group/calico/DNS + net_test.go:175: (dbg) Run: kubectl --context calico-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/calico/Localhost + net_test.go:194: (dbg) Run: kubectl --context calico-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/calico/HairPin + net_test.go:264: (dbg) Run: kubectl --context calico-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "calico" test finished in 4m35.563962262s, failed=false + helpers_test.go:352: "netcat-cd4db9dbf-mssqr" [5dd1b35f-9c84-46a3-b6d8-563d845b737c] Running + net_test.go:163: (dbg) TestNetworkPlugins/group/custom-flannel/NetCatPod: app=netcat healthy within 8.001802852s +=== RUN TestNetworkPlugins/group/custom-flannel/DNS + net_test.go:175: (dbg) Run: kubectl --context custom-flannel-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/custom-flannel/Localhost + net_test.go:194: (dbg) Run: kubectl --context custom-flannel-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/custom-flannel/HairPin + net_test.go:264: (dbg) Run: kubectl --context custom-flannel-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "custom-flannel" test finished in 4m39.025627434s, failed=false + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p false-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=false --driver=docker --container-runtime=docker: (35.387688021s) +=== RUN TestNetworkPlugins/group/false/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p false-999044 "pgrep -a kubelet" +I1102 23:24:39.755636 37869 config.go:182] Loaded profile config "false-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/false/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context false-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-lrsv6" [631c7650-d88b-413d-af32-b6dfc3454f24] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + net_test.go:163: (dbg) TestNetworkPlugins/group/kindnet/NetCatPod: app=netcat healthy within 8.003153295s +=== RUN TestNetworkPlugins/group/kindnet/DNS + net_test.go:175: (dbg) Run: kubectl --context kindnet-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/kindnet/Localhost + net_test.go:194: (dbg) Run: kubectl --context kindnet-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/kindnet/HairPin + net_test.go:264: (dbg) Run: kubectl --context kindnet-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "kindnet" test finished in 4m41.226023834s, failed=false +E1102 23:24:41.961642 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + helpers_test.go:352: "netcat-cd4db9dbf-lrsv6" [631c7650-d88b-413d-af32-b6dfc3454f24] Running +E1102 23:24:46.653064 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + net_test.go:163: (dbg) TestNetworkPlugins/group/false/NetCatPod: app=netcat healthy within 8.00373912s +=== RUN TestNetworkPlugins/group/false/DNS + net_test.go:175: (dbg) Run: kubectl --context false-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/false/Localhost + net_test.go:194: (dbg) Run: kubectl --context false-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" + net_test.go:211: + ----------------------- debugLogs start: calico-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.176.3 netcat-cd4db9dbf-7nq9q + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.103.2 calico-999044 + 192.168.103.1 host.minikube.internal + 192.168.103.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.103.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/calico-999044 Ready control-plane 52s v1.34.1 192.168.103.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 50s + default service/netcat ClusterIP 10.104.166.171 8080/TCP 16s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 50s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.103.2:8443 50s + default endpoints/netcat 10.244.176.3:8080 16s + kube-system endpoints/k8s.io-minikube-hostpath 43s + kube-system endpoints/kube-dns 10.244.176.1:53,10.244.176.1:53,10.244.176.1:9153 44s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/calico-node 1 1 1 1 1 kubernetes.io/os=linux 49s calico-node docker.io/calico/node:v3.30.3 k8s-app=calico-node + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 50s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 16s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/calico-kube-controllers 1/1 1 1 49s calico-kube-controllers docker.io/calico/kube-controllers:v3.30.3 k8s-app=calico-kube-controllers + kube-system deployment.apps/coredns 1/1 1 1 50s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-7nq9q 1/1 Running 0 16s 10.244.176.3 calico-999044 + kube-system pod/calico-kube-controllers-59556d9b4c-rgpgr 1/1 Running 0 43s 10.244.176.2 calico-999044 + kube-system pod/calico-node-qcvqc 1/1 Running 0 44s 192.168.103.2 calico-999044 + kube-system pod/coredns-66bc5c9577-9rf5v 1/1 Running 0 43s 10.244.176.1 calico-999044 + kube-system pod/etcd-calico-999044 1/1 Running 0 50s 192.168.103.2 calico-999044 + kube-system pod/kube-apiserver-calico-999044 1/1 Running 0 50s 192.168.103.2 calico-999044 + kube-system pod/kube-controller-manager-calico-999044 1/1 Running 0 50s 192.168.103.2 calico-999044 + kube-system pod/kube-proxy-74pjj 1/1 Running 0 44s 192.168.103.2 calico-999044 + kube-system pod/kube-scheduler-calico-999044 1/1 Running 0 50s 192.168.103.2 calico-999044 + kube-system pod/storage-provisioner 1/1 Running 0 43s 192.168.103.2 calico-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 5b0a41ed8d2df 15 seconds ago Ready netcat-cd4db9dbf-7nq9q default 0 (default) + 57adce76f9bb2 26 seconds ago Ready coredns-66bc5c9577-9rf5v kube-system 6 (default) + 0cd87fa4d54f7 26 seconds ago Ready calico-kube-controllers-59556d9b4c-rgpgr kube-system 6 (default) + 7749886edb8c6 27 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 5 (default) + b0cd42e55b60b 27 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 5 (default) + e3ef798be033b 28 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 4 (default) + e9d2d32159bdc 28 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 4 (default) + e5a211e29e4b8 29 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 3 (default) + 03c8da80a5d19 29 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 3 (default) + bd9b6b0d25fa8 30 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 2 (default) + 104d5bebead52 30 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 2 (default) + fac0c45ed698d 31 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 1 (default) + b9fc4e3f4f533 31 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 1 (default) + 8dac9ede76929 32 seconds ago Ready storage-provisioner kube-system 0 (default) + fc729db3bd1aa 32 seconds ago NotReady calico-kube-controllers-59556d9b4c-rgpgr kube-system 0 (default) + 4e1e45bf7bd32 32 seconds ago NotReady coredns-66bc5c9577-9rf5v kube-system 0 (default) + 2603a408e2c6b 43 seconds ago Ready calico-node-qcvqc kube-system 0 (default) + dde9b4da154f8 43 seconds ago Ready kube-proxy-74pjj kube-system 0 (default) + 2e5c3272146af 53 seconds ago Ready kube-controller-manager-calico-999044 kube-system 0 (default) + c5a68c7cdf9c4 53 seconds ago Ready kube-apiserver-calico-999044 kube-system 0 (default) + ae8ec048213c0 53 seconds ago Ready etcd-calico-999044 kube-system 0 (default) + be24cc3df1e5b 53 seconds ago Ready kube-scheduler-calico-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + fe56587fb359a registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 14 seconds ago Running dnsutils 0 5b0a41ed8d2df netcat-cd4db9dbf-7nq9q default + 4ef632533934d calico/kube-controllers@sha256:b9df43a10ec4cc40ab95779f646d1f9c1675259b3baacf883f5247333bb6385d 21 seconds ago Running calico-kube-controllers 0 0cd87fa4d54f7 calico-kube-controllers-59556d9b4c-rgpgr kube-system + 8ffa310205890 52546a367cc9e 25 seconds ago Running coredns 0 57adce76f9bb2 coredns-66bc5c9577-9rf5v kube-system + 95b421e96489a ce9c4ac0f175f 27 seconds ago Running calico-node 0 2603a408e2c6b calico-node-qcvqc kube-system + c6bf997abba43 calico/node@sha256:92d8bcca3280cd27b9c98cb6e70c3af10ad6ff8accd288919b04ae0cd6021c2e 27 seconds ago Exited mount-bpffs 0 2603a408e2c6b calico-node-qcvqc kube-system + 982b268669915 6e38f40d628db 32 seconds ago Running storage-provisioner 0 8dac9ede76929 storage-provisioner kube-system + d24bc8dcd5c32 034822460c2f6 36 seconds ago Exited install-cni 0 2603a408e2c6b calico-node-qcvqc kube-system + b8ccf290767be calico/cni@sha256:b32ac832411b188a8adc9e31b3e23cbbecd6d63c182a3802e947303f97c2f700 37 seconds ago Exited upgrade-ipam 0 2603a408e2c6b calico-node-qcvqc kube-system + 7e284a7fa8255 fc25172553d79 43 seconds ago Running kube-proxy 0 dde9b4da154f8 kube-proxy-74pjj kube-system + eccae0c56fef6 c80c8dbafe7dd 53 seconds ago Running kube-controller-manager 0 2e5c3272146af kube-controller-manager-calico-999044 kube-system + 0a35af3123a44 7dd6aaa1717ab 53 seconds ago Running kube-scheduler 0 be24cc3df1e5b kube-scheduler-calico-999044 kube-system + 57fa4d22fc492 c3994bc696102 53 seconds ago Running kube-apiserver 0 c5a68c7cdf9c4 kube-apiserver-calico-999044 kube-system + 76dbbd7263050 5f1f5298c888d 53 seconds ago Running etcd 0 ae8ec048213c0 etcd-calico-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:24:25 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 16s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-7nq9q + Namespace: default + Priority: 0 + Service Account: default + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:24:25 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: cni.projectcalico.org/containerID: 5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5 + cni.projectcalico.org/podIP: 10.244.176.3/32 + cni.projectcalico.org/podIPs: 10.244.176.3/32 + Status: Running + IP: 10.244.176.3 + IPs: + IP: 10.244.176.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://fe56587fb359a7beae2c679f568b02dde0f6b58401cf3269ec68d0694437ad63 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:24:27 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-sntjb (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-sntjb: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 16s default-scheduler Successfully assigned default/netcat-cd4db9dbf-7nq9q to calico-999044 + Normal Pulling 15s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 14s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 1.208s (1.208s including waiting). Image size: 127004766 bytes. + Normal Created 14s kubelet Created container: dnsutils + Normal Started 14s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:23:51 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 44s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 43s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-9rf5v + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:24:09 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: cni.projectcalico.org/containerID: 57adce76f9bb2442b05cffd4fb7e88936876d0f5b63af1966598fb88fa6526c4 + cni.projectcalico.org/podIP: 10.244.176.1/32 + cni.projectcalico.org/podIPs: 10.244.176.1/32 + Status: Running + IP: 10.244.176.1 + IPs: + IP: 10.244.176.1 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://8ffa31020589082ced7d4efe0b34145d92606dc0db8bea723b067a65046df579 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:24:16 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-lfscb (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-lfscb: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 44s default-scheduler 0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling. + Normal Scheduled 33s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-9rf5v to calico-999044 + Warning FailedCreatePodSandBox 32s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 31s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 30s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 29s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 28s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Normal SandboxChanged 27s (x6 over 32s) kubelet Pod sandbox changed, it will be killed and re-created. + Warning FailedCreatePodSandBox 27s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09" network for pod "coredns-66bc5c9577-9rf5v": networkPlugin cni failed to set up pod "coredns-66bc5c9577-9rf5v_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Normal Pulled 26s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 26s kubelet Created container: coredns + Normal Started 26s kubelet Started container coredns + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + .:53 + [INFO] plugin/reload: Running configuration SHA512 = 66f0a748f44f6317a6b122af3f457c9dd0ecaed8718ffbf95a69434523efd9ec4992e71f54c7edd5753646fe9af89ac2138b9c3ce14d4a0ba9d2372a55f120bb + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:42754 - 60889 "HINFO IN 3416534385991327796.4235200582284313048. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.035787504s + [INFO] 10.244.176.3:38352 - 37629 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000178694s + [INFO] 10.244.176.3:41455 - 29071 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000132247s + [INFO] 10.244.176.3:44406 - 59774 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000103514s + [INFO] 10.244.176.3:40403 - 63121 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000131197s + [INFO] 10.244.176.3:40403 - 62831 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000163986s + [INFO] 10.244.176.3:34875 - 8445 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000109072s + [INFO] 10.244.176.3:37538 - 41736 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000103365s + [INFO] 10.244.176.3:54974 - 51008 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.00009017s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-calico-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:23:51 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.103.2:8443 + kubernetes.io/config.hash: ec21345c134f0d5025a8e0738ee8e64e + kubernetes.io/config.mirror: ec21345c134f0d5025a8e0738ee8e64e + kubernetes.io/config.seen: 2025-11-02T23:23:51.634009883Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.103.2 + IPs: + IP: 192.168.103.2 + Controlled By: Node/calico-999044 + Containers: + kube-apiserver: + Container ID: docker://57fa4d22fc4927ce2b3f47cf2cc089d7d2fed0a77c90b25b32249cfff256a11c + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.103.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:23:48 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.103.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.103.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.103.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Pulled 54s kubelet Container image "registry.k8s.io/kube-apiserver:v1.34.1" already present on machine + Normal Created 54s kubelet Created container: kube-apiserver + Normal Started 54s kubelet Started container kube-apiserver + + + >>> k8s: api server logs: + I1102 23:23:48.558276 1 options.go:263] external host was not specified, using 192.168.103.2 + I1102 23:23:48.559662 1 server.go:150] Version: v1.34.1 + I1102 23:23:48.559680 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:23:48.941240 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:23:48.941374 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:23:48.941383 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:23:48.941386 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:23:48.941389 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:23:48.941392 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:23:48.941395 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:23:48.941397 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:23:48.941400 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:23:48.941419 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:23:48.941424 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:23:48.941427 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:23:48.955765 1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:48.955890 1 logging.go:55] [core] [Channel #2 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:48.955965 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:23:48.959052 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:48.963057 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:23:48.963108 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:23:48.963257 1 instance.go:239] Using reconciler: lease + W1102 23:23:48.964182 1 logging.go:55] [core] [Channel #8 SubChannel #9]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:48.969265 1 logging.go:55] [core] [Channel #12 SubChannel #13]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:48.991479 1 logging.go:55] [core] [Channel #22 SubChannel #23]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:48.999199 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:23:48.999220 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.003074 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:23:49.003878 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.009562 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.015478 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.023512 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.032244 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.039353 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.046790 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.054728 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.060535 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.067440 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.073094 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.079755 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.087799 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.094787 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.102205 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.107420 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.117180 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.133509 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:23:49.133696 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:23:49.134394 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.138443 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.142758 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.148278 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.153022 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.159266 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.164628 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.170204 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.188399 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.210325 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.216896 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.222828 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.227797 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.234379 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.241754 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.249173 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.254402 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.260508 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.267753 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.273474 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.279195 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.283880 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.288651 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.295258 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.301869 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:23:49.303179 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.316300 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.328449 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.336725 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.344369 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.350088 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.360371 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.368137 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.373170 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.379768 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.387051 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.393452 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.398452 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.411298 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.419886 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.426762 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.450027 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:23:49.450041 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.450045 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.450306 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:23:49.450315 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.450897 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:23:49.451443 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:23:49.451478 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:23:49.451490 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:23:49.452240 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:23:49.452268 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:23:49.452751 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:23:49.452815 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.452831 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.453215 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:23:49.453238 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.453248 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:23:49.453556 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:23:49.453578 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.454742 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:23:49.454782 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.455051 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:23:49.455069 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.455078 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.455456 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:23:49.455472 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:23:49.456364 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:23:49.456392 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.456403 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.456625 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:23:49.456646 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.456655 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.457955 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:23:49.457990 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.458005 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.458623 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:23:49.458649 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:23:49.458652 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:23:49.458655 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.460973 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:23:49.461020 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:23:49.461034 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:23:49.462096 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:23:49.462180 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.462199 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.462488 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:23:49.462515 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.462550 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:23:49.463480 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:23:49.463504 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.463512 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:23:49.465007 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.469805 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:23:49.469822 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.727649 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:23:49.727692 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:23:49.727870 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:23:49.727901 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:23:49.727733 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:23:49.727704 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:49.728368 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:23:49.728387 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:23:49.728399 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:23:49.728406 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:23:49.727703 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:49.728471 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:23:49.728479 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:23:49.728603 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:23:49.728624 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:23:49.728661 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:23:49.728670 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:23:49.729005 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:23:49.729088 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:23:49.729190 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:49.729128 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:23:49.731153 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:23:49.729197 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:49.730005 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:23:49.729172 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:23:49.731442 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:23:49.729219 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:23:49.729992 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:23:49.730017 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:23:49.730038 1 controller.go:142] Starting OpenAPI controller + I1102 23:23:49.730051 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:23:49.730059 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:23:49.730065 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:23:49.730073 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:23:49.730079 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:23:49.730087 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:23:49.733196 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:23:49.733210 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:23:49.758854 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:23:49.760094 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:49.760115 1 policy_source.go:240] refreshing policies + E1102 23:23:49.798953 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" + I1102 23:23:49.828417 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:23:49.828461 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:23:49.828550 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:23:49.828570 1 aggregator.go:171] initial CRD sync complete... + I1102 23:23:49.828575 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:23:49.828578 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:23:49.828581 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:23:49.828663 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:23:49.828691 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:23:49.828699 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:23:49.829794 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:23:49.830715 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:23:49.830753 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:49.831352 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:23:49.831628 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:23:49.832093 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:23:49.832121 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:23:49.833298 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:23:49.834081 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:23:49.834590 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:49.834815 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:23:50.000972 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:23:50.732521 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:23:50.735797 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:23:50.735808 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:23:50.992085 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:23:51.022965 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:23:51.134359 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:23:51.137890 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.103.2] + I1102 23:23:51.138537 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:23:51.140870 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:23:51.745195 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:23:51.868516 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:23:51.872319 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:23:51.876274 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:23:52.653534 1 controller.go:667] quota admission added evaluator for: poddisruptionbudgets.policy + I1102 23:23:52.682187 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.688978 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.693082 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.695814 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.716505 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.720624 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.762697 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.785091 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.789215 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.794803 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.798587 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.800760 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.803143 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.806352 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.809068 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.813799 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.828607 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.835062 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.860175 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.868998 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.879752 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.882879 1 handler.go:285] Adding GroupVersion crd.projectcalico.org v1 to ResourceManager + I1102 23:23:52.934512 1 handler.go:285] Adding GroupVersion policy.networking.k8s.io v1alpha1 to ResourceManager + I1102 23:23:52.944561 1 handler.go:285] Adding GroupVersion policy.networking.k8s.io v1alpha1 to ResourceManager + W1102 23:23:56.658044 1 logging.go:55] [core] [Channel #259 SubChannel #260]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.666564 1 logging.go:55] [core] [Channel #263 SubChannel #264]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:56.681642 1 logging.go:55] [core] [Channel #267 SubChannel #268]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.688348 1 logging.go:55] [core] [Channel #271 SubChannel #272]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.693178 1 logging.go:55] [core] [Channel #275 SubChannel #276]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.698728 1 logging.go:55] [core] [Channel #279 SubChannel #280]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.703635 1 logging.go:55] [core] [Channel #283 SubChannel #284]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.711257 1 logging.go:55] [core] [Channel #287 SubChannel #288]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.717219 1 logging.go:55] [core] [Channel #291 SubChannel #292]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.724485 1 logging.go:55] [core] [Channel #295 SubChannel #296]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:56.730178 1 logging.go:55] [core] [Channel #299 SubChannel #300]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.735967 1 logging.go:55] [core] [Channel #303 SubChannel #304]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.741696 1 logging.go:55] [core] [Channel #307 SubChannel #308]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.748677 1 logging.go:55] [core] [Channel #311 SubChannel #312]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.753830 1 logging.go:55] [core] [Channel #315 SubChannel #316]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.759877 1 logging.go:55] [core] [Channel #319 SubChannel #320]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.765635 1 logging.go:55] [core] [Channel #323 SubChannel #324]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.770563 1 logging.go:55] [core] [Channel #327 SubChannel #328]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.778870 1 logging.go:55] [core] [Channel #331 SubChannel #332]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.784720 1 logging.go:55] [core] [Channel #335 SubChannel #336]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.789975 1 logging.go:55] [core] [Channel #339 SubChannel #340]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.796392 1 logging.go:55] [core] [Channel #343 SubChannel #344]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.801461 1 logging.go:55] [core] [Channel #347 SubChannel #348]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:56.805900 1 logging.go:55] [core] [Channel #351 SubChannel #352]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:56.847420 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:56.849602 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:57.548269 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:23:57.946188 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:24:25.848291 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.104.166.171"} + E1102 23:24:34.941172 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:54250: use of closed network connection + E1102 23:24:35.005130 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:54274: use of closed network connection + E1102 23:24:35.072526 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:54296: use of closed network connection + E1102 23:24:35.152532 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:54314: use of closed network connection + E1102 23:24:40.288186 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:40634: use of closed network connection + E1102 23:24:40.352961 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:40664: use of closed network connection + E1102 23:24:40.422050 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:40690: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/10-calico.conflist + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "calico-999044", + "mtu": 0, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "/etc/cni/net.d/calico-kubeconfig" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + }/etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/calico-kubeconfig + # Kubeconfig file for Calico CNI plugin. Installed by calico/node. + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + server: https://10.96.0.1:443 + certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==" + users: + - name: calico + user: + token: eyJhbGciOiJSUzI1NiIsImtpZCI6InJtaUwwTFM5UjdTalVBTmduUTJSZjFWMzhieEF2QTh3OU9SMXdCbUQxQXMifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzYyMjEyMjU2LCJpYXQiOjE3NjIxMjU4NTYsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiNTA0NTUxNTctNDU3NS00MDFjLThjMGItY2NhYmQyYjhjMWYxIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJjYWxpY28tY25pLXBsdWdpbiIsInVpZCI6ImQ0ODk5NWEwLTA5OTItNDRlYy1iOTYxLWIyNjViMjFiMjA4ZSJ9fSwibmJmIjoxNzYyMTI1ODU2LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06Y2FsaWNvLWNuaS1wbHVnaW4ifQ.YHZextWHbDmsp8w29roQC4D7wAsxgaYP9rqlnujFs-3eYjjr91sSn3NVxo5GUnaZDCYlPImMAPU4AcDWPzccUe49dYBJmiAYF8YU0ozf_aCM5tvzJWS0igSyLERVOL2EeOMD773KNEur7R3HZjgehZdfgPwhpDnnt6XjY5nh9AiXZejSj590bY71jfbrTXIEarDguSrL16Zw12LUfDa5RvwmyvJ_FyeFxEAAqesGc5bqdFzUPGzdsWe8RXqo0N_PfpL1Kqv768D3RupjgPpbwYfEL3ss41e-AMSVqffkvRS1Bgxqe2dq0EGnIbn9IxoZareLixasVVLlMN9Re5sm1A + contexts: + - name: calico-context + context: + cluster: local + user: calico + current-context: calico-context/etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0@if332: mtu 1500 qdisc noqueue state UP group default + link/ether a6:c2:72:2c:a3:86 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.103.2/24 brd 192.168.103.255 scope global eth0 + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 96:e8:98:91:df:40 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 4: caliaea9c4fe068@if2: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::ecee:eeff:feee:eeee/64 scope link + valid_lft forever preferred_lft forever + 5: caliea2fc223683@if2: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::ecee:eeff:feee:eeee/64 scope link + valid_lft forever preferred_lft forever + 6: tunl0@NONE: mtu 1480 qdisc noqueue state UNKNOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + inet 10.244.176.0/32 scope global tunl0 + valid_lft forever preferred_lft forever + 9: cali1466d0b4737@if3: mtu 1480 qdisc noqueue state UP group default qlen 1000 + link/ether ee:ee:ee:ee:ee:ee brd ff:ff:ff:ff:ff:ff link-netnsid 3 + inet6 fe80::ecee:eeff:feee:eeee/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.103.1 dev eth0 + blackhole 10.244.176.0/26 proto bird + 10.244.176.1 dev caliaea9c4fe068 scope link + 10.244.176.2 dev caliea2fc223683 scope link + 10.244.176.3 dev cali1466d0b4737 scope link + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.103.0/24 dev eth0 proto kernel scope link src 192.168.103.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:42 2025 + *raw + :PREROUTING ACCEPT [27127:106903056] + :OUTPUT ACCEPT [18061:6562181] + :cali-OUTPUT - [0:0] + :cali-PREROUTING - [0:0] + :cali-from-host-endpoint - [0:0] + :cali-rpf-skip - [0:0] + :cali-to-host-endpoint - [0:0] + -A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING + -A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT + -A cali-OUTPUT -m comment --comment "cali:clI8WObfCl4yhr60" -j MARK --set-xmark 0x0/0x1b0000 + -A cali-OUTPUT -m comment --comment "cali:ZbkN6P8OdRcBRPpU" -j cali-to-host-endpoint + -A cali-OUTPUT -m comment --comment "cali:tTi_3jFgvxUFsObH" -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-PREROUTING -m comment --comment "cali:DQ6LK2guiBRMyrLK" -j MARK --set-xmark 0x0/0x1b0000 + -A cali-PREROUTING -i cali+ -m comment --comment "cali:o0awFTLnSamXVVNW" -j MARK --set-xmark 0x80000/0x80000 + -A cali-PREROUTING -m comment --comment "cali:aJ7XamFJS43rOYyJ" -m mark --mark 0x80000/0x80000 -j cali-rpf-skip + -A cali-PREROUTING -m comment --comment "cali:hOMHbY_OXb_BF9pw" -m mark --mark 0x80000/0x80000 -m rpfilter --validmark --invert -j DROP + -A cali-PREROUTING -m comment --comment "cali:N7S4OAqlgBJWcynn" -m mark --mark 0x0/0x80000 -j cali-from-host-endpoint + -A cali-PREROUTING -m comment --comment "cali:8A061i5zP9KV-opa" -m mark --mark 0x10000/0x10000 -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:42 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:42 2025 + *mangle + :PREROUTING ACCEPT [543:46618] + :INPUT ACCEPT [27065:106897199] + :FORWARD ACCEPT [35:2798] + :OUTPUT ACCEPT [18037:6560494] + :POSTROUTING ACCEPT [18071:6562772] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :cali-POSTROUTING - [0:0] + :cali-PREROUTING - [0:0] + :cali-from-host-endpoint - [0:0] + :cali-to-host-endpoint - [0:0] + -A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING + -A POSTROUTING -m comment --comment "cali:O3lYWMrLQYEMJtB5" -j cali-POSTROUTING + -A cali-POSTROUTING -m comment --comment "cali:NX-7roTexQ3fGRfU" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-POSTROUTING -m comment --comment "cali:JkYr4aB8O4_N8NBS" -j MARK --set-xmark 0x0/0x1b0000 + -A cali-POSTROUTING -m comment --comment "cali:nO0nbHA3Or7V6l7t" -m conntrack --ctstate DNAT -j cali-to-host-endpoint + -A cali-POSTROUTING -m comment --comment "cali:mgxNjIoKySH7TEGH" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-PREROUTING -m comment --comment "cali:6BJqBjBC7crtA-7-" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-PREROUTING -m comment --comment "cali:KX7AGNd6rMcDUai6" -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-PREROUTING -m comment --comment "cali:wNH7KsA3ILKJBsY9" -j cali-from-host-endpoint + -A cali-PREROUTING -m comment --comment "cali:Cg96MgVuoPm7UMRo" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:42 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:42 2025 + *filter + :INPUT ACCEPT [5233:1291340] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [5226:1673609] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + :cali-FORWARD - [0:0] + :cali-INPUT - [0:0] + :cali-OUTPUT - [0:0] + :cali-cidr-block - [0:0] + :cali-from-hep-forward - [0:0] + :cali-from-host-endpoint - [0:0] + :cali-from-wl-dispatch - [0:0] + :cali-fw-cali1466d0b4737 - [0:0] + :cali-fw-caliaea9c4fe068 - [0:0] + :cali-fw-caliea2fc223683 - [0:0] + :cali-pri-_PTRGc0U-L5Kz7V6ERW - [0:0] + :cali-pri-_u2Tn2rSoAPffvE7JO6 - [0:0] + :cali-pri-kns.default - [0:0] + :cali-pri-kns.kube-system - [0:0] + :cali-pri-ksa.default.default - [0:0] + :cali-pro-_PTRGc0U-L5Kz7V6ERW - [0:0] + :cali-pro-_u2Tn2rSoAPffvE7JO6 - [0:0] + :cali-pro-kns.default - [0:0] + :cali-pro-kns.kube-system - [0:0] + :cali-pro-ksa.default.default - [0:0] + :cali-to-hep-forward - [0:0] + :cali-to-host-endpoint - [0:0] + :cali-to-wl-dispatch - [0:0] + :cali-tw-cali1466d0b4737 - [0:0] + :cali-tw-caliaea9c4fe068 - [0:0] + :cali-tw-caliea2fc223683 - [0:0] + :cali-wl-to-host - [0:0] + -A INPUT -m comment --comment "cali:Cz_u1IQiXIMmKD4c" -j cali-INPUT + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m comment --comment "cali:wUHhoiAYhphO9Mso" -j cali-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A FORWARD -m comment --comment "cali:S93hcgKJrXEqnTfs" -m comment --comment "Policy explicitly accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT + -A FORWARD -m comment --comment "cali:mp77cMpurHhyjLrM" -j MARK --set-xmark 0x10000/0x10000 + -A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-FORWARD -m comment --comment "cali:W_vvds1Nw3n9QE2f" -j MARK --set-xmark 0x0/0x1a0000 + -A cali-FORWARD -m comment --comment "cali:ZfgmjuiLaA8Pg0kp" -m mark --mark 0x0/0x10000 -j cali-from-hep-forward + -A cali-FORWARD -i cali+ -m comment --comment "cali:tAzwBLPaV-j53OOZ" -j cali-from-wl-dispatch + -A cali-FORWARD -o cali+ -m comment --comment "cali:4Z0Pf0byo05NFe-P" -j cali-to-wl-dispatch + -A cali-FORWARD -m comment --comment "cali:hQ7Oc16wmUtLuneJ" -j cali-to-hep-forward + -A cali-FORWARD -m comment --comment "cali:rnKNH2WxGcRQcIlD" -j cali-cidr-block + -A cali-INPUT -p ipencap -m comment --comment "cali:PajejrV4aFdkZojI" -m comment --comment "Allow IPIP packets from Calico hosts" -m set --match-set cali40all-hosts-net src -m addrtype --dst-type LOCAL -j ACCEPT + -A cali-INPUT -p ipencap -m comment --comment "cali:_wjq-Yrma8Ly1Svo" -m comment --comment "Drop IPIP packets from non-Calico hosts" -j DROP + -A cali-INPUT -i cali+ -m comment --comment "cali:8TZGxLWh_Eiz66wc" -g cali-wl-to-host + -A cali-INPUT -m comment --comment "cali:6McIeIDvPdL6PE1T" -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-INPUT -m comment --comment "cali:FHoqwXIibih3FkAm" -j MARK --set-xmark 0x0/0x1b0000 + -A cali-INPUT -m comment --comment "cali:qZ1_fX7-OsFCh64q" -j cali-from-host-endpoint + -A cali-INPUT -m comment --comment "cali:64so3UXgS7MtBy-m" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-OUTPUT -m comment --comment "cali:Mq1_rAdXXH3YkrzW" -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-OUTPUT -o cali+ -m comment --comment "cali:69FkRTJDvD5Vu6Vl" -j RETURN + -A cali-OUTPUT -p ipencap -m comment --comment "cali:AnEsmO6bDZbQntWW" -m comment --comment "Allow IPIP packets to other Calico hosts" -m set --match-set cali40all-hosts-net dst -m addrtype --src-type LOCAL -j ACCEPT + -A cali-OUTPUT -m comment --comment "cali:KMCPT0oQKE3eYLxL" -j MARK --set-xmark 0x0/0x1b0000 + -A cali-OUTPUT -m comment --comment "cali:cMHr2aCzyAyBKiqV" -m conntrack ! --ctstate DNAT -j cali-to-host-endpoint + -A cali-OUTPUT -m comment --comment "cali:EI7GEITASEcJBTEO" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT + -A cali-from-wl-dispatch -i cali1466d0b4737 -m comment --comment "cali:lqE2sQzorYnWPZGg" -g cali-fw-cali1466d0b4737 + -A cali-from-wl-dispatch -i caliaea9c4fe068 -m comment --comment "cali:Fz6a1PwB5lyEGHhW" -g cali-fw-caliaea9c4fe068 + -A cali-from-wl-dispatch -i caliea2fc223683 -m comment --comment "cali:O1ksW5BkGTkXh01V" -g cali-fw-caliea2fc223683 + -A cali-from-wl-dispatch -m comment --comment "cali:gU7B_tZD2wDENpq4" -m comment --comment "Unknown interface" -j DROP + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:Q46HJ1FBV_BsitRH" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:T57JBvuqntMMsHTj" -m conntrack --ctstate INVALID -j DROP + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:XXHmsFNLAndgo3CW" -j MARK --set-xmark 0x0/0x30000 + -A cali-fw-cali1466d0b4737 -p udp -m comment --comment "cali:IErKu1XxGhbUg7UU" -m comment --comment "Drop VXLAN encapped packets originating in workloads" -m multiport --dports 4789 -j DROP + -A cali-fw-cali1466d0b4737 -p ipencap -m comment --comment "cali:kqFqbgPkH2T5_ly9" -m comment --comment "Drop IPinIP encapped packets originating in workloads" -j DROP + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:Zxcmxh3QHmuDTi3a" -j cali-pro-kns.default + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:7t85yC1x0MBiQO1T" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:deJ_IhbHT-bke76E" -j cali-pro-ksa.default.default + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:MpfTUAONfMkNwlEi" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-cali1466d0b4737 -m comment --comment "cali:oClgrlI3aiTqY_l1" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:HJvkp_ttMKi9pJsE" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:IZBgcn3_pGLgb2yT" -m conntrack --ctstate INVALID -j DROP + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:SiUmr1DdlW1fv2CD" -j MARK --set-xmark 0x0/0x30000 + -A cali-fw-caliaea9c4fe068 -p udp -m comment --comment "cali:XSDoUAab9ZSL8QYh" -m comment --comment "Drop VXLAN encapped packets originating in workloads" -m multiport --dports 4789 -j DROP + -A cali-fw-caliaea9c4fe068 -p ipencap -m comment --comment "cali:_jqc8Oz6uuU-isHr" -m comment --comment "Drop IPinIP encapped packets originating in workloads" -j DROP + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:LZk-G9THbnN3R5wQ" -j cali-pro-kns.kube-system + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:yXkUoLBHjERgpT9j" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:NhCln1USWxMQIQrQ" -j cali-pro-_u2Tn2rSoAPffvE7JO6 + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:0Mk-yDjv7HpMQqn8" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-caliaea9c4fe068 -m comment --comment "cali:pvog0RG6PtatIgl8" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-fw-caliea2fc223683 -m comment --comment "cali:O6iIBgGc4JML6_EX" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-fw-caliea2fc223683 -m comment --comment "cali:6Sz_e67ofMkq_Y7c" -m conntrack --ctstate INVALID -j DROP + -A cali-fw-caliea2fc223683 -m comment --comment "cali:hnX4C7fAdZXya-vx" -j MARK --set-xmark 0x0/0x30000 + -A cali-fw-caliea2fc223683 -p udp -m comment --comment "cali:g-9uWoJcIgjw3G2v" -m comment --comment "Drop VXLAN encapped packets originating in workloads" -m multiport --dports 4789 -j DROP + -A cali-fw-caliea2fc223683 -p ipencap -m comment --comment "cali:bEG-3_FBTZjdru_C" -m comment --comment "Drop IPinIP encapped packets originating in workloads" -j DROP + -A cali-fw-caliea2fc223683 -m comment --comment "cali:XQVqjOJQOzyQeLiq" -j cali-pro-kns.kube-system + -A cali-fw-caliea2fc223683 -m comment --comment "cali:_OsvYtstfQWW8QU_" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-caliea2fc223683 -m comment --comment "cali:AFKXElOym7ZfG3cG" -j cali-pro-_PTRGc0U-L5Kz7V6ERW + -A cali-fw-caliea2fc223683 -m comment --comment "cali:XNAyHH_CE9yF-iCK" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-fw-caliea2fc223683 -m comment --comment "cali:ydvDwD5emySz8uLv" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-pri-_PTRGc0U-L5Kz7V6ERW -m comment --comment "cali:g4z4yZxg6IEqYbOs" -m comment --comment "Profile ksa.kube-system.calico-kube-controllers ingress" + -A cali-pri-_u2Tn2rSoAPffvE7JO6 -m comment --comment "cali:WqgznqAQ-uYV0oBx" -m comment --comment "Profile ksa.kube-system.coredns ingress" + -A cali-pri-kns.default -m comment --comment "cali:WMSw8BmYOknRHfsz" -m comment --comment "Profile kns.default ingress" -j MARK --set-xmark 0x10000/0x10000 + -A cali-pri-kns.kube-system -m comment --comment "cali:J1TyxtHWd0qaBGK-" -m comment --comment "Profile kns.kube-system ingress" -j MARK --set-xmark 0x10000/0x10000 + -A cali-pri-ksa.default.default -m comment --comment "cali:PrckJA84jX_kGp99" -m comment --comment "Profile ksa.default.default ingress" + -A cali-pro-_PTRGc0U-L5Kz7V6ERW -m comment --comment "cali:DR9-t6YJRvFY-IdZ" -m comment --comment "Profile ksa.kube-system.calico-kube-controllers egress" + -A cali-pro-_u2Tn2rSoAPffvE7JO6 -m comment --comment "cali:0-_UPh39dt5XfhmJ" -m comment --comment "Profile ksa.kube-system.coredns egress" + -A cali-pro-kns.default -m comment --comment "cali:Vr81boRqq4V77Sg8" -m comment --comment "Profile kns.default egress" -j MARK --set-xmark 0x10000/0x10000 + -A cali-pro-kns.kube-system -m comment --comment "cali:tgOR2S8DVHZW3F1M" -m comment --comment "Profile kns.kube-system egress" -j MARK --set-xmark 0x10000/0x10000 + -A cali-pro-ksa.default.default -m comment --comment "cali:bUZzZcietq9v5Ybq" -m comment --comment "Profile ksa.default.default egress" + -A cali-to-wl-dispatch -o cali1466d0b4737 -m comment --comment "cali:l2MtPh1s6lhjoMjE" -g cali-tw-cali1466d0b4737 + -A cali-to-wl-dispatch -o caliaea9c4fe068 -m comment --comment "cali:H5Z3p1WYSuQ0zKAL" -g cali-tw-caliaea9c4fe068 + -A cali-to-wl-dispatch -o caliea2fc223683 -m comment --comment "cali:etHr7dKcHDSihcu8" -g cali-tw-caliea2fc223683 + -A cali-to-wl-dispatch -m comment --comment "cali:VgcSa4HwQbvJzHNY" -m comment --comment "Unknown interface" -j DROP + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:VUMFkLbuBXxnKOxq" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:z1NV07jWRhTOYM7E" -m conntrack --ctstate INVALID -j DROP + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:cHh21CTAntjwx4UN" -j MARK --set-xmark 0x0/0x30000 + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:J0JRFqXuE9u3fm-I" -j cali-pri-kns.default + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:q8JVBeHQsyQpDhZS" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:IxtxX0Paend6fDc4" -j cali-pri-ksa.default.default + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:qmexr7WFGVEIIkPt" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-cali1466d0b4737 -m comment --comment "cali:MApakOyc32loGFvO" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:ROH5oXn1bnjgKEWM" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:1LNUCGL8CE8OhNrh" -m conntrack --ctstate INVALID -j DROP + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:O5Qq3tbZCAkLe8EO" -j MARK --set-xmark 0x0/0x30000 + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:prAxKZQFlMlhkXez" -j cali-pri-kns.kube-system + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:yGRTxIIZvwrxoPch" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:aJn-a33bCw_oGXa3" -j cali-pri-_u2Tn2rSoAPffvE7JO6 + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:PI4ullPxuijG2qNH" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-caliaea9c4fe068 -m comment --comment "cali:0dPu8hTvbtGV7s9M" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-tw-caliea2fc223683 -m comment --comment "cali:mW_yiM_TxbyEOYnZ" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A cali-tw-caliea2fc223683 -m comment --comment "cali:3cSYP6mG-Zo0EMsV" -m conntrack --ctstate INVALID -j DROP + -A cali-tw-caliea2fc223683 -m comment --comment "cali:M-LZosochRltifug" -j MARK --set-xmark 0x0/0x30000 + -A cali-tw-caliea2fc223683 -m comment --comment "cali:LgvBcD7_FgIZWspx" -j cali-pri-kns.kube-system + -A cali-tw-caliea2fc223683 -m comment --comment "cali:OkZy5JoXHPCGuPWP" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-caliea2fc223683 -m comment --comment "cali:TVnsfROVBQWKjH6_" -j cali-pri-_PTRGc0U-L5Kz7V6ERW + -A cali-tw-caliea2fc223683 -m comment --comment "cali:5HBPgD_LESQe3aOJ" -m comment --comment "Return if profile accepted" -m mark --mark 0x10000/0x10000 -j RETURN + -A cali-tw-caliea2fc223683 -m comment --comment "cali:jdtKwOnblj20t-P0" -m comment --comment "Drop if no profiles matched" -j DROP + -A cali-wl-to-host -m comment --comment "cali:Ee9Sbo10IpVujdIY" -j cali-from-wl-dispatch + -A cali-wl-to-host -m comment --comment "cali:nSZbcOoG1xPONxb8" -m comment --comment "Configured DefaultEndpointToHostAction" -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:42 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:42 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [65:3900] + :POSTROUTING ACCEPT [74:4575] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-2R3HE55LAXVWD4VV - [0:0] + :KUBE-SEP-IRP3JNLC7JEK2KSX - [0:0] + :KUBE-SEP-P5PFLD5SRGLMSC36 - [0:0] + :KUBE-SEP-QPAQX3CSKXOU5VQU - [0:0] + :KUBE-SEP-SHAXBLZK6MQ4UNVP - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + :cali-OUTPUT - [0:0] + :cali-POSTROUTING - [0:0] + :cali-PREROUTING - [0:0] + :cali-fip-dnat - [0:0] + :cali-fip-snat - [0:0] + :cali-nat-outgoing - [0:0] + -A PREROUTING -m comment --comment "cali:6gwbT8clXdHdC1b1" -j cali-PREROUTING + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.103.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "cali:tVnHkvAo15HuiPy0" -j cali-OUTPUT + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.103.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.103.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -m comment --comment "cali:0i8pjzKKPyA34aQD" -j cali-POSTROUTING + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.103.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:43995 + -A DOCKER_OUTPUT -d 192.168.103.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:56602 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 43995 -j SNAT --to-source 192.168.103.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 56602 -j SNAT --to-source 192.168.103.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-2R3HE55LAXVWD4VV -s 10.244.176.1/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-2R3HE55LAXVWD4VV -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.176.1:53 + -A KUBE-SEP-IRP3JNLC7JEK2KSX -s 10.244.176.1/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-IRP3JNLC7JEK2KSX -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.176.1:9153 + -A KUBE-SEP-P5PFLD5SRGLMSC36 -s 10.244.176.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-P5PFLD5SRGLMSC36 -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.176.3:8080 + -A KUBE-SEP-QPAQX3CSKXOU5VQU -s 192.168.103.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-QPAQX3CSKXOU5VQU -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.103.2:8443 + -A KUBE-SEP-SHAXBLZK6MQ4UNVP -s 10.244.176.1/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-SHAXBLZK6MQ4UNVP -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.176.1:53 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.104.166.171/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.176.1:53" -j KUBE-SEP-SHAXBLZK6MQ4UNVP + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.176.1:9153" -j KUBE-SEP-IRP3JNLC7JEK2KSX + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.103.2:8443" -j KUBE-SEP-QPAQX3CSKXOU5VQU + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.176.1:53" -j KUBE-SEP-2R3HE55LAXVWD4VV + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.104.166.171/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.176.3:8080" -j KUBE-SEP-P5PFLD5SRGLMSC36 + -A cali-OUTPUT -m comment --comment "cali:GBTAv2p5CwevEyJm" -j cali-fip-dnat + -A cali-POSTROUTING -m comment --comment "cali:Z-c7XtVd2Bq7s_hA" -j cali-fip-snat + -A cali-POSTROUTING -m comment --comment "cali:nYKhEzDlr11Jccal" -j cali-nat-outgoing + -A cali-POSTROUTING -o tunl0 -m comment --comment "cali:SXWvdsbh4Mw7wOln" -m addrtype ! --src-type LOCAL --limit-iface-out -m addrtype --src-type LOCAL -j MASQUERADE --random-fully + -A cali-PREROUTING -m comment --comment "cali:r6XmIziWUJsdOK6Z" -j cali-fip-dnat + -A cali-nat-outgoing -m comment --comment "cali:flqWnvo8yq4ULQLa" -m set --match-set cali40masq-ipam-pools src -m set ! --match-set cali40all-ipam-pools dst -j MASQUERADE --random-fully + COMMIT + # Completed on Sun Nov 2 23:24:42 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 51 3195 cali-PREROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:6gwbT8clXdHdC1b1 */ + 56 3520 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.103.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 65 packets, 3900 bytes) + pkts bytes target prot opt in out source destination + 540 46891 cali-OUTPUT 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:tVnHkvAo15HuiPy0 */ + 821 70759 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 625 60301 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.103.1 + 88 5280 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 74 packets, 4575 bytes) + pkts bytes target prot opt in out source destination + 831 71494 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.103.1 + 547 47446 cali-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:0i8pjzKKPyA34aQD */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.103.1 tcp dpt:53 to:127.0.0.11:43995 + 626 60386 DNAT 17 -- * * 0.0.0.0/0 192.168.103.1 udp dpt:53 to:127.0.0.11:56602 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:43995 to:192.168.103.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:56602 to:192.168.103.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 74 4575 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-2R3HE55LAXVWD4VV (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.176.1 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.176.1:53 + + Chain KUBE-SEP-IRP3JNLC7JEK2KSX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.176.1 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.176.1:9153 + + Chain KUBE-SEP-P5PFLD5SRGLMSC36 (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.176.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.176.3:8080 + + Chain KUBE-SEP-QPAQX3CSKXOU5VQU (1 references) + pkts bytes target prot opt in out source destination + 20 1200 KUBE-MARK-MASQ 0 -- * * 192.168.103.2 0.0.0.0/0 /* default/kubernetes:https */ + 24 1440 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.103.2:8443 + + Chain KUBE-SEP-SHAXBLZK6MQ4UNVP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.176.1 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.176.1:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.104.166.171 /* default/netcat cluster IP */ tcp dpt:8080 + 101 6060 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-SHAXBLZK6MQ4UNVP 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.176.1:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-IRP3JNLC7JEK2KSX 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.176.1:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 20 1200 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 24 1440 KUBE-SEP-QPAQX3CSKXOU5VQU 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.103.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-2R3HE55LAXVWD4VV 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.176.1:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.104.166.171 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-P5PFLD5SRGLMSC36 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.176.3:8080 */ + + Chain cali-OUTPUT (1 references) + pkts bytes target prot opt in out source destination + 540 46891 cali-fip-dnat 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:GBTAv2p5CwevEyJm */ + + Chain cali-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 547 47446 cali-fip-snat 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:Z-c7XtVd2Bq7s_hA */ + 547 47446 cali-nat-outgoing 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:nYKhEzDlr11Jccal */ + 0 0 MASQUERADE 0 -- * tunl0 0.0.0.0/0 0.0.0.0/0 /* cali:SXWvdsbh4Mw7wOln */ ADDRTYPE match src-type !LOCAL limit-out ADDRTYPE match src-type LOCAL random-fully + + Chain cali-PREROUTING (1 references) + pkts bytes target prot opt in out source destination + 51 3195 cali-fip-dnat 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:r6XmIziWUJsdOK6Z */ + + Chain cali-fip-dnat (2 references) + pkts bytes target prot opt in out source destination + + Chain cali-fip-snat (1 references) + pkts bytes target prot opt in out source destination + + Chain cali-nat-outgoing (1 references) + pkts bytes target prot opt in out source destination + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* cali:flqWnvo8yq4ULQLa */ match-set cali40masq-ipam-pools src ! match-set cali40all-ipam-pools dst random-fully + + + >>> k8s: describe calico daemon set: + Name: calico-node + Namespace: kube-system + Selector: k8s-app=calico-node + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=calico-node + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=calico-node + Service Account: calico-node + Init Containers: + upgrade-ipam: + Image: docker.io/calico/cni:v3.30.3 + Port: + Host Port: + Command: + /opt/cni/bin/calico-ipam + -upgrade + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + KUBERNETES_NODE_NAME: (v1:spec.nodeName) + CALICO_NETWORKING_BACKEND: Optional: false + Mounts: + /host/opt/cni/bin from cni-bin-dir (rw) + /var/lib/cni/networks from host-local-net-dir (rw) + install-cni: + Image: docker.io/calico/cni:v3.30.3 + Port: + Host Port: + Command: + /opt/cni/bin/install + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + CNI_CONF_NAME: 10-calico.conflist + CNI_NETWORK_CONFIG: Optional: false + KUBERNETES_NODE_NAME: (v1:spec.nodeName) + CNI_MTU: Optional: false + SLEEP: false + Mounts: + /host/etc/cni/net.d from cni-net-dir (rw) + /host/opt/cni/bin from cni-bin-dir (rw) + mount-bpffs: + Image: docker.io/calico/node:v3.30.3 + Port: + Host Port: + Command: + calico-node + -init + -best-effort + Environment: + Mounts: + /nodeproc from nodeproc (ro) + /sys/fs from sys-fs (rw) + /var/run/calico from var-run-calico (rw) + Containers: + calico-node: + Image: docker.io/calico/node:v3.30.3 + Port: + Host Port: + Requests: + cpu: 250m + Liveness: exec [/bin/calico-node -felix-live -bird-live] delay=10s timeout=10s period=10s #success=1 #failure=6 + Readiness: exec [/bin/calico-node -felix-ready -bird-ready] delay=0s timeout=10s period=10s #success=1 #failure=3 + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + DATASTORE_TYPE: kubernetes + WAIT_FOR_DATASTORE: true + NODENAME: (v1:spec.nodeName) + CALICO_NETWORKING_BACKEND: Optional: false + CLUSTER_TYPE: k8s,bgp + IP: autodetect + CALICO_IPV4POOL_IPIP: Always + CALICO_IPV4POOL_VXLAN: Never + CALICO_IPV6POOL_VXLAN: Never + FELIX_IPINIPMTU: Optional: false + FELIX_VXLANMTU: Optional: false + FELIX_WIREGUARDMTU: Optional: false + CALICO_DISABLE_FILE_LOGGING: true + FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT + FELIX_IPV6SUPPORT: false + FELIX_HEALTHENABLED: true + Mounts: + /host/etc/cni/net.d from cni-net-dir (rw) + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /sys/fs/bpf from bpffs (rw) + /var/lib/calico from var-lib-calico (rw) + /var/log/calico/cni from cni-log-dir (ro) + /var/run/calico from var-run-calico (rw) + /var/run/nodeagent from policysync (rw) + Volumes: + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + var-run-calico: + Type: HostPath (bare host directory volume) + Path: /var/run/calico + HostPathType: DirectoryOrCreate + var-lib-calico: + Type: HostPath (bare host directory volume) + Path: /var/lib/calico + HostPathType: DirectoryOrCreate + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + sys-fs: + Type: HostPath (bare host directory volume) + Path: /sys/fs/ + HostPathType: DirectoryOrCreate + bpffs: + Type: HostPath (bare host directory volume) + Path: /sys/fs/bpf + HostPathType: Directory + nodeproc: + Type: HostPath (bare host directory volume) + Path: /proc + HostPathType: + cni-bin-dir: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: DirectoryOrCreate + cni-net-dir: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + cni-log-dir: + Type: HostPath (bare host directory volume) + Path: /var/log/calico/cni + HostPathType: + host-local-net-dir: + Type: HostPath (bare host directory volume) + Path: /var/lib/cni/networks + HostPathType: + policysync: + Type: HostPath (bare host directory volume) + Path: /var/run/nodeagent + HostPathType: DirectoryOrCreate + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: :NoSchedule op=Exists + :NoExecute op=Exists + CriticalAddonsOnly op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 46s daemonset-controller Created pod: calico-node-qcvqc + + + >>> k8s: describe calico daemon set pod(s): + Name: calico-node-qcvqc + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: calico-node + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:23:57 +0000 + Labels: controller-revision-hash=74fc8c5545 + k8s-app=calico-node + pod-template-generation=1 + Annotations: + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.103.2 + IPs: + IP: 192.168.103.2 + Controlled By: DaemonSet/calico-node + Init Containers: + upgrade-ipam: + Container ID: docker://b8ccf290767be762a6dc8481705a29f9cf9531d529dfdc7e5e29c4e8963c9f70 + Image: docker.io/calico/cni:v3.30.3 + Image ID: docker-pullable://calico/cni@sha256:b32ac832411b188a8adc9e31b3e23cbbecd6d63c182a3802e947303f97c2f700 + Port: + Host Port: + Command: + /opt/cni/bin/calico-ipam + -upgrade + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:24:04 +0000 + Finished: Sun, 02 Nov 2025 23:24:05 +0000 + Ready: True + Restart Count: 0 + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + KUBERNETES_NODE_NAME: (v1:spec.nodeName) + CALICO_NETWORKING_BACKEND: Optional: false + Mounts: + /host/opt/cni/bin from cni-bin-dir (rw) + /var/lib/cni/networks from host-local-net-dir (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pkrxx (ro) + install-cni: + Container ID: docker://d24bc8dcd5c32d786a08b5f9fabdd193e99db59410aef0bf16870b34aa8cc465 + Image: docker.io/calico/cni:v3.30.3 + Image ID: docker-pullable://calico/cni@sha256:b32ac832411b188a8adc9e31b3e23cbbecd6d63c182a3802e947303f97c2f700 + Port: + Host Port: + Command: + /opt/cni/bin/install + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:24:08 +0000 + Finished: Sun, 02 Nov 2025 23:24:08 +0000 + Ready: True + Restart Count: 0 + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + CNI_CONF_NAME: 10-calico.conflist + CNI_NETWORK_CONFIG: Optional: false + KUBERNETES_NODE_NAME: (v1:spec.nodeName) + CNI_MTU: Optional: false + SLEEP: false + Mounts: + /host/etc/cni/net.d from cni-net-dir (rw) + /host/opt/cni/bin from cni-bin-dir (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pkrxx (ro) + mount-bpffs: + Container ID: docker://c6bf997abba43fd5fa66e7ac1aaea29756157623933e9372c7d81c5553dc9d87 + Image: docker.io/calico/node:v3.30.3 + Image ID: docker-pullable://calico/node@sha256:92d8bcca3280cd27b9c98cb6e70c3af10ad6ff8accd288919b04ae0cd6021c2e + Port: + Host Port: + Command: + calico-node + -init + -best-effort + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:24:14 +0000 + Finished: Sun, 02 Nov 2025 23:24:14 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /nodeproc from nodeproc (ro) + /sys/fs from sys-fs (rw) + /var/run/calico from var-run-calico (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pkrxx (ro) + Containers: + calico-node: + Container ID: docker://95b421e96489aaf05b61c92f602a153c119207ed0c7f303913fa479c27154e1f + Image: docker.io/calico/node:v3.30.3 + Image ID: docker-pullable://calico/node@sha256:92d8bcca3280cd27b9c98cb6e70c3af10ad6ff8accd288919b04ae0cd6021c2e + Port: + Host Port: + State: Running + Started: Sun, 02 Nov 2025 23:24:14 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: exec [/bin/calico-node -felix-live -bird-live] delay=10s timeout=10s period=10s #success=1 #failure=6 + Readiness: exec [/bin/calico-node -felix-ready -bird-ready] delay=0s timeout=10s period=10s #success=1 #failure=3 + Environment Variables from: + kubernetes-services-endpoint ConfigMap Optional: true + Environment: + DATASTORE_TYPE: kubernetes + WAIT_FOR_DATASTORE: true + NODENAME: (v1:spec.nodeName) + CALICO_NETWORKING_BACKEND: Optional: false + CLUSTER_TYPE: k8s,bgp + IP: autodetect + CALICO_IPV4POOL_IPIP: Always + CALICO_IPV4POOL_VXLAN: Never + CALICO_IPV6POOL_VXLAN: Never + FELIX_IPINIPMTU: Optional: false + FELIX_VXLANMTU: Optional: false + FELIX_WIREGUARDMTU: Optional: false + CALICO_DISABLE_FILE_LOGGING: true + FELIX_DEFAULTENDPOINTTOHOSTACTION: ACCEPT + FELIX_IPV6SUPPORT: false + FELIX_HEALTHENABLED: true + Mounts: + /host/etc/cni/net.d from cni-net-dir (rw) + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /sys/fs/bpf from bpffs (rw) + /var/lib/calico from var-lib-calico (rw) + /var/log/calico/cni from cni-log-dir (ro) + /var/run/calico from var-run-calico (rw) + /var/run/nodeagent from policysync (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-pkrxx (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + var-run-calico: + Type: HostPath (bare host directory volume) + Path: /var/run/calico + HostPathType: DirectoryOrCreate + var-lib-calico: + Type: HostPath (bare host directory volume) + Path: /var/lib/calico + HostPathType: DirectoryOrCreate + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + sys-fs: + Type: HostPath (bare host directory volume) + Path: /sys/fs/ + HostPathType: DirectoryOrCreate + bpffs: + Type: HostPath (bare host directory volume) + Path: /sys/fs/bpf + HostPathType: Directory + nodeproc: + Type: HostPath (bare host directory volume) + Path: /proc + HostPathType: + cni-bin-dir: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: DirectoryOrCreate + cni-net-dir: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + cni-log-dir: + Type: HostPath (bare host directory volume) + Path: /var/log/calico/cni + HostPathType: + host-local-net-dir: + Type: HostPath (bare host directory volume) + Path: /var/lib/cni/networks + HostPathType: + policysync: + Type: HostPath (bare host directory volume) + Path: /var/run/nodeagent + HostPathType: DirectoryOrCreate + kube-api-access-pkrxx: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: :NoSchedule op=Exists + :NoExecute op=Exists + CriticalAddonsOnly op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 46s default-scheduler Successfully assigned kube-system/calico-node-qcvqc to calico-999044 + Normal Pulling 45s kubelet Pulling image "docker.io/calico/cni:v3.30.3" + Normal Pulled 39s kubelet Successfully pulled image "docker.io/calico/cni:v3.30.3" in 5.928s (5.928s including waiting). Image size: 162398524 bytes. + Normal Created 39s kubelet Created container: upgrade-ipam + Normal Started 38s kubelet Started container upgrade-ipam + Normal Pulled 38s kubelet Container image "docker.io/calico/cni:v3.30.3" already present on machine + Normal Created 35s kubelet Created container: install-cni + Normal Started 35s kubelet Started container install-cni + Normal Pulling 34s kubelet Pulling image "docker.io/calico/node:v3.30.3" + Normal Pulled 29s kubelet Successfully pulled image "docker.io/calico/node:v3.30.3" in 4.706s (4.706s including waiting). Image size: 400961309 bytes. + Normal Created 29s kubelet Created container: mount-bpffs + Normal Started 29s kubelet Started container mount-bpffs + Normal Pulled 29s kubelet Container image "docker.io/calico/node:v3.30.3" already present on machine + Normal Created 29s kubelet Created container: calico-node + Normal Started 29s kubelet Started container calico-node + Warning Unhealthy 28s kubelet Readiness probe failed: calico/node is not ready: BIRD is not ready: Error querying BIRD: unable to connect to BIRDv4 socket: dial unix /var/run/bird/bird.ctl: connect: no such file or directory + Warning Unhealthy 27s kubelet Readiness probe failed: calico/node is not ready: BIRD is not ready: Error querying BIRD: unable to connect to BIRDv4 socket: dial unix /var/run/calico/bird.ctl: connect: connection refused + + + >>> k8s: calico daemon set container(s) logs (current): + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.794 [INFO][1] init-best-effort/startup.go 437: Early log level set to info + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.794 [INFO][1] init-best-effort/calico-init_linux.go 57: Checking if BPF filesystem is mounted. + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.794 [INFO][1] init-best-effort/calico-init_linux.go 78: BPF filesystem is not mounted. Trying to mount it... + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.795 [INFO][1] init-best-effort/calico-init_linux.go 83: Mounted BPF filesystem. + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.795 [INFO][1] init-best-effort/calico-init_linux.go 92: Checking if cgroup2 filesystem is mounted. + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.795 [INFO][1] init-best-effort/calico-init_linux.go 121: Cgroup2 filesystem is not mounted. Trying to mount it... + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.795 [INFO][1] init-best-effort/calico-init_linux.go 127: Mount point /run/calico/cgroup is ready for mounting root cgroup2 fs + [pod/calico-node-qcvqc/mount-bpffs] 2025-11-02 23:24:14.797 [INFO][1] init-best-effort/calico-init_linux.go 136: Mounted root cgroup2 filesystem. + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.968 [INFO][80] felix/calc_graph.go 568: Local endpoint updated id=WorkloadEndpoint(node=calico-999044, orchestrator=k8s, workload=default/netcat-cd4db9dbf-7nq9q, name=eth0) + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/int_dataplane.go 2201: Received *proto.WorkloadEndpointUpdate update from calculation graph. msg=id:{orchestrator_id:"k8s" workload_id:"default/netcat-cd4db9dbf-7nq9q" endpoint_id:"eth0"} endpoint:{state:"active" name:"cali1466d0b4737" profile_ids:"kns.default" profile_ids:"ksa.default.default" ipv4_nets:"10.244.176.3/32"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/endpoint_mgr.go 756: Updating per-endpoint chains. id=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/endpoint_mgr.go 789: Updating endpoint routes. id=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/endpoint_mgr.go 843: Updating QoS bandwidth state if changed id=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/endpoint_mgr.go 1436: Applying /proc/sys configuration to interface. ifaceName="cali1466d0b4737" + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/endpoint_mgr.go 631: Re-evaluated workload endpoint status adminUp=true failed=false known=true operUp=true status="up" workloadEndpointID=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.969 [INFO][80] felix/status_combiner.go 62: Storing endpoint status update ipVersion=0x4 status="up" workload=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.974 [INFO][80] felix/status_combiner.go 95: Endpoint up for at least one IP version id=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} ipVersion=0x4 status="up" + [pod/calico-node-qcvqc/calico-node] 2025-11-02 23:24:27.974 [INFO][80] felix/status_combiner.go 114: Reporting combined status. id=types.WorkloadEndpointID{OrchestratorId:"k8s", WorkloadId:"default/netcat-cd4db9dbf-7nq9q", EndpointId:"eth0"} status="up" + [pod/calico-node-qcvqc/upgrade-ipam] 2025-11-02 23:24:05.062 [INFO][1] ipam/ipam_plugin.go 70: migrating from host-local to calico-ipam... + [pod/calico-node-qcvqc/upgrade-ipam] 2025-11-02 23:24:05.064 [INFO][1] ipam/migrate.go 64: checking host-local IPAM data dir existence... + [pod/calico-node-qcvqc/upgrade-ipam] 2025-11-02 23:24:05.064 [INFO][1] ipam/migrate.go 66: host-local IPAM data dir not found; no migration necessary, successfully exiting... + [pod/calico-node-qcvqc/upgrade-ipam] 2025-11-02 23:24:05.064 [INFO][1] ipam/ipam_plugin.go 100: migration from host-local to calico-ipam complete node="calico-999044" + [pod/calico-node-qcvqc/install-cni] "snat": true, + [pod/calico-node-qcvqc/install-cni] "capabilities": {"portMappings": true} + [pod/calico-node-qcvqc/install-cni] } + [pod/calico-node-qcvqc/install-cni] ] + [pod/calico-node-qcvqc/install-cni] } + [pod/calico-node-qcvqc/install-cni] 2025-11-02 23:24:08.862 [INFO][1] cni-installer/install.go 319: Using CNI config template from CNI_NETWORK_CONFIG environment variable. + [pod/calico-node-qcvqc/install-cni] 2025-11-02 23:24:08.862 [INFO][1] cni-installer/install.go 391: CNI config file permission is set to 0600 + [pod/calico-node-qcvqc/install-cni] + [pod/calico-node-qcvqc/install-cni] 2025-11-02 23:24:08.862 [INFO][1] cni-installer/install.go 414: Created /host/etc/cni/net.d/10-calico.conflist + [pod/calico-node-qcvqc/install-cni] 2025-11-02 23:24:08.862 [INFO][1] cni-installer/install.go 254: Done configuring CNI. Sleep= false + + + >>> k8s: calico daemon set container(s) logs (previous): + error: previous terminated container "upgrade-ipam" in pod "calico-node-qcvqc" not found + error: previous terminated container "install-cni" in pod "calico-node-qcvqc" not found + error: previous terminated container "mount-bpffs" in pod "calico-node-qcvqc" not found + error: previous terminated container "calico-node" in pod "calico-node-qcvqc" not found + + + >>> k8s: describe calico deployment: + Name: calico-kube-controllers + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:23:52 +0000 + Labels: k8s-app=calico-kube-controllers + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=calico-kube-controllers + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: Recreate + MinReadySeconds: 0 + Pod Template: + Labels: k8s-app=calico-kube-controllers + Service Account: calico-kube-controllers + Containers: + calico-kube-controllers: + Image: docker.io/calico/kube-controllers:v3.30.3 + Port: + Host Port: + Liveness: exec [/usr/bin/check-status -l] delay=10s timeout=10s period=10s #success=1 #failure=6 + Readiness: exec [/usr/bin/check-status -r] delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + ENABLED_CONTROLLERS: node,loadbalancer + DATASTORE_TYPE: kubernetes + Mounts: + Volumes: + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node-role.kubernetes.io/master:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: calico-kube-controllers-59556d9b4c (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 46s deployment-controller Scaled up replica set calico-kube-controllers-59556d9b4c from 0 to 1 + + + >>> k8s: describe calico deployment pod(s): + Name: calico-kube-controllers-59556d9b4c-rgpgr + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: calico-kube-controllers + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:24:09 +0000 + Labels: k8s-app=calico-kube-controllers + pod-template-hash=59556d9b4c + Annotations: cni.projectcalico.org/containerID: 0cd87fa4d54f7857b75fc7f7ca68da14c3d25050d5460b72bb36185c389329c6 + cni.projectcalico.org/podIP: 10.244.176.2/32 + cni.projectcalico.org/podIPs: 10.244.176.2/32 + Status: Running + SeccompProfile: RuntimeDefault + IP: 10.244.176.2 + IPs: + IP: 10.244.176.2 + Controlled By: ReplicaSet/calico-kube-controllers-59556d9b4c + Containers: + calico-kube-controllers: + Container ID: docker://4ef632533934d272648f9f4d136c5cd40382650fac7549ea0b82e983297ddcbb + Image: docker.io/calico/kube-controllers:v3.30.3 + Image ID: docker-pullable://calico/kube-controllers@sha256:b9df43a10ec4cc40ab95779f646d1f9c1675259b3baacf883f5247333bb6385d + Port: + Host Port: + State: Running + Started: Sun, 02 Nov 2025 23:24:20 +0000 + Ready: True + Restart Count: 0 + Liveness: exec [/usr/bin/check-status -l] delay=10s timeout=10s period=10s #success=1 #failure=6 + Readiness: exec [/usr/bin/check-status -r] delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + ENABLED_CONTROLLERS: node,loadbalancer + DATASTORE_TYPE: kubernetes + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-slrls (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-slrls: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node-role.kubernetes.io/master:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 45s default-scheduler 0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling. + Normal Scheduled 34s default-scheduler Successfully assigned kube-system/calico-kube-controllers-59556d9b4c-rgpgr to calico-999044 + Warning FailedCreatePodSandBox 33s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 32s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 31s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 30s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Warning FailedCreatePodSandBox 29s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Normal SandboxChanged 28s (x6 over 33s) kubelet Pod sandbox changed, it will be killed and re-created. + Warning FailedCreatePodSandBox 28s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d" network for pod "calico-kube-controllers-59556d9b4c-rgpgr": networkPlugin cni failed to set up pod "calico-kube-controllers-59556d9b4c-rgpgr_kube-system" network: plugin type="calico" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/ + Normal Pulling 27s kubelet Pulling image "docker.io/calico/kube-controllers:v3.30.3" + Normal Pulled 23s kubelet Successfully pulled image "docker.io/calico/kube-controllers:v3.30.3" in 4.413s (4.413s including waiting). Image size: 121527133 bytes. + Normal Created 23s kubelet Created container: calico-kube-controllers + Normal Started 23s kubelet Started container calico-kube-controllers + + + >>> k8s: calico deployment container(s) logs (current): + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] I1102 23:24:20.844624 15 shared_informer.go:313] Waiting for caches to sync for pods + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] I1102 23:24:20.844627 15 shared_informer.go:320] Caches are synced for pods + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:20.844 [INFO][15] kube-controllers/hostendpoints.go 149: Will run periodic HostEndpoint sync every 5m0s + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:20.844 [INFO][15] kube-controllers/hostendpoints.go 185: Syncer is InSync, kicking sync channel status=in-sync + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:20.844 [INFO][15] kube-controllers/hostendpoints.go 262: Syncing all HostEndpoints + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:20.844 [INFO][15] kube-controllers/ipam.go 292: Will run periodic IPAM sync every 7m30s + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:20.844 [INFO][15] kube-controllers/ipam.go 398: Syncer is InSync, kicking sync channel status=in-sync + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:21.845 [INFO][15] kube-controllers/ipam.go 828: Checking dirty nodes for leaks and redundant affinities + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:23.411 [INFO][15] kube-controllers/ipam.go 828: Checking dirty nodes for leaks and redundant affinities + [pod/calico-kube-controllers-59556d9b4c-rgpgr/calico-kube-controllers] 2025-11-02 23:24:27.262 [INFO][15] kube-controllers/ipam.go 828: Checking dirty nodes for leaks and redundant affinities + + + >>> k8s: calico deployment container(s) logs (previous): + error: previous terminated container "calico-kube-controllers" in pod "calico-kube-controllers-59556d9b4c-rgpgr" not found + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 46s daemonset-controller Created pod: kube-proxy-74pjj + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-74pjj + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: calico-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:23:57 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.103.2 + IPs: + IP: 192.168.103.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://7e284a7fa8255148fb1b6f07889c078c4cd0ab09712ae27ed938da23fe023a12 + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:23:58 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zhmpn (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-zhmpn: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 46s default-scheduler Successfully assigned kube-system/kube-proxy-74pjj to calico-999044 + Normal Pulled 45s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 45s kubelet Created container: kube-proxy + Normal Started 45s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:23:58.536002 1 server_linux.go:53] "Using iptables proxy" + I1102 23:23:58.570754 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:23:58.671143 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:23:58.671164 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.103.2"] + E1102 23:23:58.671214 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:23:58.689849 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:23:58.689897 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:23:58.693845 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:23:58.694135 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:23:58.694160 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:23:58.695086 1 config.go:309] "Starting node config controller" + I1102 23:23:58.695245 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:23:58.695268 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:23:58.695285 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:23:58.695293 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:23:58.695316 1 config.go:200] "Starting service config controller" + I1102 23:23:58.695326 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:23:58.695335 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:23:58.695340 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:23:58.795614 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:23:58.795625 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:23:58.795615 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://calico-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://calico-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:51 UTC; 52s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2222 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 34.8M + CPU: 1.325s + CGroup: /system.slice/kubelet.service + └─2222 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=calico-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2 + + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016483 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016530 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.854346 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.857555 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/calico-node-qcvqc" podStartSLOduration=2.80434477 podStartE2EDuration="18.857540026s" podCreationTimestamp="2025-11-02 23:23:57 +0000 UTC" firstStartedPulling="2025-11-02 23:23:58.410874303 +0000 UTC m=+6.837238182" lastFinishedPulling="2025-11-02 23:24:14.464069552 +0000 UTC m=+22.890433438" observedRunningTime="2025-11-02 23:24:15.857361512 +0000 UTC m=+24.283725412" watchObservedRunningTime="2025-11-02 23:24:15.857540026 +0000 UTC m=+24.283903926" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.861549 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09" + Nov 02 23:24:17 calico-999044 kubelet[2222]: I1102 23:24:17.900414 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-9rf5v" podStartSLOduration=19.900399204 podStartE2EDuration="19.900399204s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:16.894462389 +0000 UTC m=+25.320826289" watchObservedRunningTime="2025-11-02 23:24:17.900399204 +0000 UTC m=+26.326763104" + Nov 02 23:24:20 calico-999044 kubelet[2222]: I1102 23:24:20.977223 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podStartSLOduration=18.56320923 podStartE2EDuration="22.977209004s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="2025-11-02 23:24:16.181411747 +0000 UTC m=+24.607775631" lastFinishedPulling="2025-11-02 23:24:20.595411519 +0000 UTC m=+29.021775405" observedRunningTime="2025-11-02 23:24:20.92851402 +0000 UTC m=+29.354877924" watchObservedRunningTime="2025-11-02 23:24:20.977209004 +0000 UTC m=+29.403572906" + Nov 02 23:24:25 calico-999044 kubelet[2222]: I1102 23:24:25.937021 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sntjb\" (UniqueName: \"kubernetes.io/projected/8fb21457-8ecf-460b-be19-61ea84636cc0-kube-api-access-sntjb\") pod \"netcat-cd4db9dbf-7nq9q\" (UID: \"8fb21457-8ecf-460b-be19-61ea84636cc0\") " pod="default/netcat-cd4db9dbf-7nq9q" + Nov 02 23:24:27 calico-999044 kubelet[2222]: I1102 23:24:27.968993 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-7nq9q" podStartSLOduration=1.7608707369999999 podStartE2EDuration="2.96897711s" podCreationTimestamp="2025-11-02 23:24:25 +0000 UTC" firstStartedPulling="2025-11-02 23:24:26.276469492 +0000 UTC m=+34.702833372" lastFinishedPulling="2025-11-02 23:24:27.484575858 +0000 UTC m=+35.910939745" observedRunningTime="2025-11-02 23:24:27.968846015 +0000 UTC m=+36.395209914" watchObservedRunningTime="2025-11-02 23:24:27.96897711 +0000 UTC m=+36.395341008" + Nov 02 23:24:34 calico-999044 kubelet[2222]: E1102 23:24:34.941174 2222 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:37864->[::1]:32823: write tcp [::1]:37864->[::1]:32823: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://calico-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://calico-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=calico-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:23:44 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:23:44 calico-999044 kubelet[1545]: E1102 23:23:44.727840 1545 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:44 calico-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:44 calico-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:45 calico-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:45 calico-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:23:45 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:23:45 calico-999044 kubelet[1642]: E1102 23:23:45.517830 1642 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:45 calico-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:45 calico-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:46 calico-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:46 calico-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:23:46 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:23:46 calico-999044 kubelet[1718]: E1102 23:23:46.273955 1718 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:46 calico-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:46 calico-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:46 calico-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:46 calico-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:23:46 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:23:47 calico-999044 kubelet[1729]: E1102 23:23:47.023106 1729 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:47 calico-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:47 calico-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:47 calico-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:23:47 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 617. + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.789393 1761 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.789579 1761 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.789599 1761 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.789605 1761 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.789987 1761 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.793254 1761 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.103.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.793522 1761 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.795611 1761 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.798964 1761 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.798984 1761 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.799143 1761 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.799158 1761 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"calico-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.799253 1761 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.799260 1761 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.799331 1761 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800059 1761 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800184 1761 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800195 1761 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800211 1761 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800248 1761 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.800548 1761 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.103.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dcalico-999044&limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.800798 1761 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.103.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.800999 1761 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.801836 1761 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.801863 1761 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:47 calico-999044 kubelet[1761]: W1102 23:23:47.801905 1761 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.802614 1761 server.go:1262] "Started kubelet" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.802787 1761 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.803009 1761 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.803065 1761 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.803049 1761 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.103.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.103.2:8443: connect: connection refused" event="&Event{ObjectMeta:{calico-999044.1874541aafb6cf1b default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:calico-999044,UID:calico-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:calico-999044,},FirstTimestamp:2025-11-02 23:23:47.802582811 +0000 UTC m=+0.245011624,LastTimestamp:2025-11-02 23:23:47.802582811 +0000 UTC m=+0.245011624,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:calico-999044,}" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.803261 1761 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.804126 1761 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.804480 1761 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.805093 1761 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"calico-999044\" not found" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.805123 1761 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.805252 1761 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.805253 1761 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.805328 1761 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.805612 1761 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/calico-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.805648 1761 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.103.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.807767 1761 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.808969 1761 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.808982 1761 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.813339 1761 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.813346 1761 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.813362 1761 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.814088 1761 policy_none.go:49] "None policy: Start" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.814104 1761 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.814112 1761 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.814434 1761 policy_none.go:47] "Start" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.817233 1761 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.818167 1761 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.818248 1761 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.818451 1761 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.818507 1761 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.819063 1761 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.103.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.839587 1761 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.839672 1761 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.839747 1761 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.839929 1761 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.840364 1761 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.840459 1761 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"calico-999044\" not found" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.937709 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.940285 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:47 calico-999044 kubelet[1761]: I1102 23:23:47.943353 1761 kubelet_node_status.go:75] "Attempting to register node" node="calico-999044" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.943570 1761 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.103.2:8443/api/v1/nodes\": dial tcp 192.168.103.2:8443: connect: connection refused" node="calico-999044" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.949618 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:47 calico-999044 kubelet[1761]: E1102 23:23:47.952401 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006665 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-usr-share-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006692 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-usr-local-share-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006708 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-usr-share-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006720 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/b513a50b43fd7edba352a6c04622ec1b-etcd-certs\") pod \"etcd-calico-999044\" (UID: \"b513a50b43fd7edba352a6c04622ec1b\") " pod="kube-system/etcd-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006733 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-etc-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006745 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-k8s-certs\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006760 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/95f457c061b2c80b9e7ee4b00c5feac1-kubeconfig\") pod \"kube-scheduler-calico-999044\" (UID: \"95f457c061b2c80b9e7ee4b00c5feac1\") " pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006800 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-ca-certs\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006819 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-ca-certs\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006840 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-flexvolume-dir\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006857 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/b513a50b43fd7edba352a6c04622ec1b-etcd-data\") pod \"etcd-calico-999044\" (UID: \"b513a50b43fd7edba352a6c04622ec1b\") " pod="kube-system/etcd-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.006859 1761 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/calico-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006870 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-usr-local-share-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006882 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-etc-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006908 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-k8s-certs\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.006962 1761 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-kubeconfig\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.144554 1761 kubelet_node_status.go:75] "Attempting to register node" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.144782 1761 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.103.2:8443/api/v1/nodes\": dial tcp 192.168.103.2:8443: connect: connection refused" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.408212 1761 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/calico-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:23:48 calico-999044 kubelet[1761]: I1102 23:23:48.545695 1761 kubelet_node_status.go:75] "Attempting to register node" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.546188 1761 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.103.2:8443/api/v1/nodes\": dial tcp 192.168.103.2:8443: connect: connection refused" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.832107 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.835295 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.841292 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:48 calico-999044 kubelet[1761]: E1102 23:23:48.845432 1761 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.348686 1761 kubelet_node_status.go:75] "Attempting to register node" node="calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.744816 1761 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"calico-999044\" not found" node="calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.802031 1761 apiserver.go:52] "Watching apiserver" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.806281 1761 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.840887 1761 kubelet_node_status.go:78] "Successfully registered node" node="calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.848163 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.848246 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.848347 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.851663 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.851824 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.851824 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.906104 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.907510 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.907746 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.909510 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.909625 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.911178 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: I1102 23:23:49.911268 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:49 calico-999044 kubelet[1761]: E1102 23:23:49.912724 1761 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-calico-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[1761]: I1102 23:23:51.418179 1761 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:51 calico-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 749. + Nov 02 23:23:51 calico-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:23:51 calico-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 749 and the job result is done. + Nov 02 23:23:51 calico-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 749. + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.604230 2222 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.604290 2222 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.604314 2222 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.604323 2222 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.604508 2222 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.605742 2222 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.608063 2222 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.610847 2222 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614347 2222 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614378 2222 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614565 2222 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614580 2222 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"calico-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614685 2222 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614692 2222 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.614712 2222 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.615217 2222 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.615348 2222 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.615357 2222 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.615374 2222 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.615398 2222 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.616198 2222 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.616546 2222 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.616565 2222 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.617455 2222 server.go:1262] "Started kubelet" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.617986 2222 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618031 2222 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618226 2222 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618273 2222 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618473 2222 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618844 2222 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.618909 2222 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.619034 2222 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.619209 2222 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.619358 2222 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:51 calico-999044 kubelet[2222]: E1102 23:23:51.620550 2222 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"calico-999044\" not found" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.621494 2222 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.629654 2222 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.636091 2222 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.655463 2222 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.656413 2222 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.656561 2222 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.656584 2222 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:51 calico-999044 kubelet[2222]: E1102 23:23:51.656661 2222 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657085 2222 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657098 2222 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657112 2222 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657217 2222 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657224 2222 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657238 2222 policy_none.go:49] "None policy: Start" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657245 2222 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657253 2222 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657316 2222 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.657322 2222 policy_none.go:47] "Start" + Nov 02 23:23:51 calico-999044 kubelet[2222]: E1102 23:23:51.660641 2222 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.660762 2222 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.660770 2222 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.660898 2222 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:51 calico-999044 kubelet[2222]: E1102 23:23:51.662153 2222 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.758022 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.758041 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.758069 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.758324 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.761859 2222 kubelet_node_status.go:75] "Attempting to register node" node="calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: E1102 23:23:51.763835 2222 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-calico-999044\" already exists" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.765985 2222 kubelet_node_status.go:124] "Node was previously registered" node="calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.766053 2222 kubelet_node_status.go:78] "Successfully registered node" node="calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919687 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-flexvolume-dir\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919705 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-kubeconfig\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919716 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-usr-share-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919728 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-k8s-certs\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919735 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-ca-certs\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919742 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-etc-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919749 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-k8s-certs\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919757 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/95f457c061b2c80b9e7ee4b00c5feac1-kubeconfig\") pod \"kube-scheduler-calico-999044\" (UID: \"95f457c061b2c80b9e7ee4b00c5feac1\") " pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919764 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-ca-certs\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919773 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-etc-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919780 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/206a473943a2c4321f21347a693c9f43-usr-local-share-ca-certificates\") pod \"kube-controller-manager-calico-999044\" (UID: \"206a473943a2c4321f21347a693c9f43\") " pod="kube-system/kube-controller-manager-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919791 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/b513a50b43fd7edba352a6c04622ec1b-etcd-certs\") pod \"etcd-calico-999044\" (UID: \"b513a50b43fd7edba352a6c04622ec1b\") " pod="kube-system/etcd-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919799 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/b513a50b43fd7edba352a6c04622ec1b-etcd-data\") pod \"etcd-calico-999044\" (UID: \"b513a50b43fd7edba352a6c04622ec1b\") " pod="kube-system/etcd-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919834 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-usr-local-share-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:51 calico-999044 kubelet[2222]: I1102 23:23:51.919863 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/ec21345c134f0d5025a8e0738ee8e64e-usr-share-ca-certificates\") pod \"kube-apiserver-calico-999044\" (UID: \"ec21345c134f0d5025a8e0738ee8e64e\") " pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.616993 2222 apiserver.go:52] "Watching apiserver" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.619796 2222 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.682906 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.682974 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.683106 2222 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: E1102 23:23:52.686724 2222 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-calico-999044\" already exists" pod="kube-system/kube-scheduler-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: E1102 23:23:52.686724 2222 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-calico-999044\" already exists" pod="kube-system/kube-apiserver-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: E1102 23:23:52.686724 2222 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-calico-999044\" already exists" pod="kube-system/etcd-calico-999044" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.711629 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-calico-999044" podStartSLOduration=1.711615912 podStartE2EDuration="1.711615912s" podCreationTimestamp="2025-11-02 23:23:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:52.697685274 +0000 UTC m=+1.124049179" watchObservedRunningTime="2025-11-02 23:23:52.711615912 +0000 UTC m=+1.137979797" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.724821 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-calico-999044" podStartSLOduration=1.7248104180000001 podStartE2EDuration="1.724810418s" podCreationTimestamp="2025-11-02 23:23:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:52.714172489 +0000 UTC m=+1.140536404" watchObservedRunningTime="2025-11-02 23:23:52.724810418 +0000 UTC m=+1.151174372" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.725214 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-calico-999044" podStartSLOduration=1.72520577 podStartE2EDuration="1.72520577s" podCreationTimestamp="2025-11-02 23:23:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:52.724735764 +0000 UTC m=+1.151099664" watchObservedRunningTime="2025-11-02 23:23:52.72520577 +0000 UTC m=+1.151569670" + Nov 02 23:23:52 calico-999044 kubelet[2222]: I1102 23:23:52.735734 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-calico-999044" podStartSLOduration=1.7357229429999999 podStartE2EDuration="1.735722943s" podCreationTimestamp="2025-11-02 23:23:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:52.730669953 +0000 UTC m=+1.157033858" watchObservedRunningTime="2025-11-02 23:23:52.735722943 +0000 UTC m=+1.162086848" + Nov 02 23:23:56 calico-999044 kubelet[2222]: I1102 23:23:56.832579 2222 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:23:56 calico-999044 kubelet[2222]: I1102 23:23:56.832990 2222 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060842 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkrxx\" (UniqueName: \"kubernetes.io/projected/fbd84e65-cb83-485b-862f-96e56d2177e0-kube-api-access-pkrxx\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060877 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-lib-modules\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060893 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhmpn\" (UniqueName: \"kubernetes.io/projected/b62b1ade-0a14-464a-aea3-ffedb4daf511-kube-api-access-zhmpn\") pod \"kube-proxy-74pjj\" (UID: \"b62b1ade-0a14-464a-aea3-ffedb4daf511\") " pod="kube-system/kube-proxy-74pjj" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060906 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-bin-dir\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-cni-bin-dir\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060930 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"policysync\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-policysync\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060944 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-calico\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-var-run-calico\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060956 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-calico\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-var-lib-calico\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060968 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-local-net-dir\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-host-local-net-dir\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060981 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-log-dir\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-cni-log-dir\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.060997 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/b62b1ade-0a14-464a-aea3-ffedb4daf511-xtables-lock\") pod \"kube-proxy-74pjj\" (UID: \"b62b1ade-0a14-464a-aea3-ffedb4daf511\") " pod="kube-system/kube-proxy-74pjj" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061009 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys-fs\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-sys-fs\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061024 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bpffs\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-bpffs\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061039 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b62b1ade-0a14-464a-aea3-ffedb4daf511-lib-modules\") pod \"kube-proxy-74pjj\" (UID: \"b62b1ade-0a14-464a-aea3-ffedb4daf511\") " pod="kube-system/kube-proxy-74pjj" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061051 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nodeproc\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-nodeproc\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061063 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-net-dir\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-cni-net-dir\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061075 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/b62b1ade-0a14-464a-aea3-ffedb4daf511-kube-proxy\") pod \"kube-proxy-74pjj\" (UID: \"b62b1ade-0a14-464a-aea3-ffedb4daf511\") " pod="kube-system/kube-proxy-74pjj" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.061088 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/fbd84e65-cb83-485b-862f-96e56d2177e0-xtables-lock\") pod \"calico-node-qcvqc\" (UID: \"fbd84e65-cb83-485b-862f-96e56d2177e0\") " pod="kube-system/calico-node-qcvqc" + Nov 02 23:23:58 calico-999044 kubelet[2222]: I1102 23:23:58.715575 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-74pjj" podStartSLOduration=1.7155567870000001 podStartE2EDuration="1.715556787s" podCreationTimestamp="2025-11-02 23:23:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:58.715557816 +0000 UTC m=+7.141921720" watchObservedRunningTime="2025-11-02 23:23:58.715556787 +0000 UTC m=+7.141920683" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.492952 2222 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.547992 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slrls\" (UniqueName: \"kubernetes.io/projected/acc14785-2dcc-4ca5-a13a-1a3ff97062cb-kube-api-access-slrls\") pod \"calico-kube-controllers-59556d9b4c-rgpgr\" (UID: \"acc14785-2dcc-4ca5-a13a-1a3ff97062cb\") " pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.548014 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvpkv\" (UniqueName: \"kubernetes.io/projected/014a34ec-dc2c-4526-8a82-bd958455203e-kube-api-access-jvpkv\") pod \"storage-provisioner\" (UID: \"014a34ec-dc2c-4526-8a82-bd958455203e\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.548025 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/014a34ec-dc2c-4526-8a82-bd958455203e-tmp\") pod \"storage-provisioner\" (UID: \"014a34ec-dc2c-4526-8a82-bd958455203e\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.548037 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfscb\" (UniqueName: \"kubernetes.io/projected/278f13b5-dd10-4c32-a954-ae02c6b63e7a-kube-api-access-lfscb\") pod \"coredns-66bc5c9577-9rf5v\" (UID: \"278f13b5-dd10-4c32-a954-ae02c6b63e7a\") " pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:09 calico-999044 kubelet[2222]: I1102 23:24:09.548046 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/278f13b5-dd10-4c32-a954-ae02c6b63e7a-config-volume\") pod \"coredns-66bc5c9577-9rf5v\" (UID: \"278f13b5-dd10-4c32-a954-ae02c6b63e7a\") " pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.006346 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.006399 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.006414 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.006453 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.013279 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.013332 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.013352 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:10 calico-999044 kubelet[2222]: E1102 23:24:10.013387 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:10 calico-999044 kubelet[2222]: I1102 23:24:10.762281 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e1e45bf7bd3230039dd866ea3b8b95ebf4ff48c9a880191409b0aed4464662c" + Nov 02 23:24:10 calico-999044 kubelet[2222]: I1102 23:24:10.768137 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc729db3bd1aa88be654e851b0ad66c32593b7d0d156501d8922baabf03251bb" + Nov 02 23:24:10 calico-999044 kubelet[2222]: I1102 23:24:10.771396 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=12.771382264 podStartE2EDuration="12.771382264s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:10.770990914 +0000 UTC m=+19.197354811" watchObservedRunningTime="2025-11-02 23:24:10.771382264 +0000 UTC m=+19.197746160" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.029860 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.029902 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.029927 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.029971 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.031507 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.031549 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.031562 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:11 calico-999044 kubelet[2222]: E1102 23:24:11.031608 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:11 calico-999044 kubelet[2222]: I1102 23:24:11.773893 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb" + Nov 02 23:24:11 calico-999044 kubelet[2222]: I1102 23:24:11.776716 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9fc4e3f4f53354f33273b7dd75c7ddf149ee2d7c71d1dc4e6631962eb3783ce" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.149701 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.149739 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.149754 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.149790 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.150708 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.150742 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.150758 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:12 calico-999044 kubelet[2222]: E1102 23:24:12.150799 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:12 calico-999044 kubelet[2222]: I1102 23:24:12.783910 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3" + Nov 02 23:24:12 calico-999044 kubelet[2222]: I1102 23:24:12.788430 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.579935 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.579990 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.580018 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.580079 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.584938 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.584976 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.584991 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:13 calico-999044 kubelet[2222]: E1102 23:24:13.585046 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:13 calico-999044 kubelet[2222]: I1102 23:24:13.797957 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b" + Nov 02 23:24:13 calico-999044 kubelet[2222]: I1102 23:24:13.806419 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542198 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542245 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542264 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542304 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542852 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542879 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542899 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:14 calico-999044 kubelet[2222]: E1102 23:24:14.542961 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:14 calico-999044 kubelet[2222]: I1102 23:24:14.831332 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33" + Nov 02 23:24:14 calico-999044 kubelet[2222]: I1102 23:24:14.837284 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016316 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016365 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016379 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d\" network for pod \"calico-kube-controllers-59556d9b4c-rgpgr\": networkPlugin cni failed to set up pod \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016316 2222 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016419 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system(acc14785-2dcc-4ca5-a13a-1a3ff97062cb)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d\\\" network for pod \\\"calico-kube-controllers-59556d9b4c-rgpgr\\\": networkPlugin cni failed to set up pod \\\"calico-kube-controllers-59556d9b4c-rgpgr_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podUID="acc14785-2dcc-4ca5-a13a-1a3ff97062cb" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016459 2222 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016483 2222 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\" network for pod \"coredns-66bc5c9577-9rf5v\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-9rf5v_kube-system\" network: plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-66bc5c9577-9rf5v" + Nov 02 23:24:15 calico-999044 kubelet[2222]: E1102 23:24:15.016530 2222 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-9rf5v_kube-system(278f13b5-dd10-4c32-a954-ae02c6b63e7a)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09\\\" network for pod \\\"coredns-66bc5c9577-9rf5v\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-9rf5v_kube-system\\\" network: plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-66bc5c9577-9rf5v" podUID="278f13b5-dd10-4c32-a954-ae02c6b63e7a" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.854346 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.857555 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/calico-node-qcvqc" podStartSLOduration=2.80434477 podStartE2EDuration="18.857540026s" podCreationTimestamp="2025-11-02 23:23:57 +0000 UTC" firstStartedPulling="2025-11-02 23:23:58.410874303 +0000 UTC m=+6.837238182" lastFinishedPulling="2025-11-02 23:24:14.464069552 +0000 UTC m=+22.890433438" observedRunningTime="2025-11-02 23:24:15.857361512 +0000 UTC m=+24.283725412" watchObservedRunningTime="2025-11-02 23:24:15.857540026 +0000 UTC m=+24.283903926" + Nov 02 23:24:15 calico-999044 kubelet[2222]: I1102 23:24:15.861549 2222 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09" + Nov 02 23:24:17 calico-999044 kubelet[2222]: I1102 23:24:17.900414 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-9rf5v" podStartSLOduration=19.900399204 podStartE2EDuration="19.900399204s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:16.894462389 +0000 UTC m=+25.320826289" watchObservedRunningTime="2025-11-02 23:24:17.900399204 +0000 UTC m=+26.326763104" + Nov 02 23:24:20 calico-999044 kubelet[2222]: I1102 23:24:20.977223 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/calico-kube-controllers-59556d9b4c-rgpgr" podStartSLOduration=18.56320923 podStartE2EDuration="22.977209004s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="2025-11-02 23:24:16.181411747 +0000 UTC m=+24.607775631" lastFinishedPulling="2025-11-02 23:24:20.595411519 +0000 UTC m=+29.021775405" observedRunningTime="2025-11-02 23:24:20.92851402 +0000 UTC m=+29.354877924" watchObservedRunningTime="2025-11-02 23:24:20.977209004 +0000 UTC m=+29.403572906" + Nov 02 23:24:25 calico-999044 kubelet[2222]: I1102 23:24:25.937021 2222 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sntjb\" (UniqueName: \"kubernetes.io/projected/8fb21457-8ecf-460b-be19-61ea84636cc0-kube-api-access-sntjb\") pod \"netcat-cd4db9dbf-7nq9q\" (UID: \"8fb21457-8ecf-460b-be19-61ea84636cc0\") " pod="default/netcat-cd4db9dbf-7nq9q" + Nov 02 23:24:27 calico-999044 kubelet[2222]: I1102 23:24:27.968993 2222 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-7nq9q" podStartSLOduration=1.7608707369999999 podStartE2EDuration="2.96897711s" podCreationTimestamp="2025-11-02 23:24:25 +0000 UTC" firstStartedPulling="2025-11-02 23:24:26.276469492 +0000 UTC m=+34.702833372" lastFinishedPulling="2025-11-02 23:24:27.484575858 +0000 UTC m=+35.910939745" observedRunningTime="2025-11-02 23:24:27.968846015 +0000 UTC m=+36.395209914" watchObservedRunningTime="2025-11-02 23:24:27.96897711 +0000 UTC m=+36.395341008" + Nov 02 23:24:34 calico-999044 kubelet[2222]: E1102 23:24:34.941174 2222 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:37864->[::1]:32823: write tcp [::1]:37864->[::1]:32823: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.103.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:calico-999044 + name: system:node:calico-999044@mk + current-context: system:node:calico-999044@mk + kind: Config + users: + - name: system:node:calico-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: calico-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.85.2:8443 + name: custom-flannel-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: false-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kindnet-999044 + contexts: + - context: + cluster: calico-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: calico-999044 + name: calico-999044 + - context: + cluster: custom-flannel-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: custom-flannel-999044 + name: custom-flannel-999044 + - context: + cluster: false-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: false-999044 + name: false-999044 + - context: + cluster: kindnet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kindnet-999044 + name: kindnet-999044 + current-context: false-999044 + kind: Config + users: + - name: calico-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.key + - name: custom-flannel-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.key + - name: false-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.key + - name: kindnet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "439" + uid: f8743ecf-6fe8-48f2-ae23-3ade222f1232 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "440" + uid: 69b64e81-14db-4c44-a764-60f48b455ed0 + - apiVersion: v1 + data: + jws-kubeconfig-662yes: eyJhbGciOiJIUzI1NiIsImtpZCI6IjY2MnllcyJ9..KF4PVqqb25I6vuNKNJwW1L-nLwbRUUEJ3c4U8NKxfww + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: cluster-info + namespace: kube-public + resourceVersion: "429" + uid: 102fd6b5-09d8-477f-b4af-4874a35d4fcd + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "441" + uid: 7625258d-5ca6-4d62-b325-4eb10ed7aa57 + - apiVersion: v1 + data: + calico_backend: bird + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + typha_service_name: none + veth_mtu: "0" + kind: ConfigMap + metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"calico_backend":"bird","cni_network_config":"{\n \"name\": \"k8s-pod-network\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"calico\",\n \"log_level\": \"info\",\n \"log_file_path\": \"/var/log/calico/cni/cni.log\",\n \"datastore_type\": \"kubernetes\",\n \"nodename\": \"__KUBERNETES_NODE_NAME__\",\n \"mtu\": __CNI_MTU__,\n \"ipam\": {\n \"type\": \"calico-ipam\"\n },\n \"policy\": {\n \"type\": \"k8s\"\n },\n \"kubernetes\": {\n \"kubeconfig\": \"__KUBECONFIG_FILEPATH__\"\n }\n },\n {\n \"type\": \"portmap\",\n \"snat\": true,\n \"capabilities\": {\"portMappings\": true}\n }\n ]\n}","typha_service_name":"none","veth_mtu":"0"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"calico-config","namespace":"kube-system"}} + creationTimestamp: "2025-11-02T23:23:52Z" + name: calico-config + namespace: kube-system + resourceVersion: "302" + uid: 14bb2542-c49a-4922-908a-d50c74ca9be1 + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.103.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: coredns + namespace: kube-system + resourceVersion: "488" + uid: 46e45952-9cfc-4822-8299-c9f5d4f672a8 + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIKJrc+ws4/ZswDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMxODQ1WhcNMzUxMDMxMjMyMzQ1 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAKu4jTqOldy+cl4tk7SEmPVc7gmpAe+IMmGmZB9MQx3+0aMy + rHNWsM+tOsLHa/pM2eaUyAYTer/RlQbxnZXBzgQtcZe9I6O97bI4DrdoefWpVpHH + jNhFefJR34nYWAvTq1X8YRmOPtol78VGQNBZWnVQ/PPobbeaqnhWZXREpO483UVZ + d78raajb7royL6O5ectyD6ZrURZ+5iwZHcIoPwRiMB01uMQXbbwXtqUhlxkXe0VM + ZM7Oykl11h07+gQ6NpGIo9keq+TgY0lf3hmM63PWg5WE+1uJSi0J/owji+qCkDva + B2BSquCEQfvtt5S8qM1lGaYBH/HkyLZQy6BYmCUCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEc3DMhF6cBs08Y/ECv+ + wvLI4+JcMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAUoNjS++SgihUqmToKIMYJEhpGIdSUgs4OwoofM2+GDhoKA9BqEm7sBfXt + fqfHZYeDreLq0TK0brHX2sWjiFIQXmC/LHdVgrJ6bqDp4v3pwqGGfwm2L/Fjoey2 + ZHpnK9lKeIIXYtdLbzPSd1sQWiT047j06U6QV4P8RlMWiyBV+ZGMhJIyd3osAnoy + IW841NVrjUEzDeioVZggniTy1G9jRFf8vIEKOiPCMLD2sR86KoJF4uEK/UAUP7RT + SQE26WOtHIZwmJoOEljDsbMjd1ayhQOjkDBJK/O/b06CVhadeNCRyZAiXJE9SD+s + VnKNGrVZM5z+AN0LbNKR2Q1xeuVP + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:49Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "26" + uid: 4e6ee435-1b7b-441f-b366-4d117f1309ac + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:49Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "17" + uid: 8f307a5c-04ed-497c-8701-6df927fdaef8 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "286" + uid: 6b13525f-b13a-4fe8-9b83-49a1911ea46a + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "442" + uid: 06535d3f-c0b7-4b3a-a52e-165a4b5360ec + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.103.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "201" + uid: f541149c-2a0d-4619-ad3d-df8c040f6bac + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "204" + uid: 41de7bac-a634-4856-b873-c0557474f996 + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://calico-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:44 UTC; 1min 0s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1060 (dockerd) + Tasks: 14 + Memory: 856.2M + CPU: 9.679s + CGroup: /system.slice/docker.service + └─1060 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:24:10 calico-999044 dockerd[1060]: time="2025-11-02T23:24:10.996576235Z" level=info msg="ignoring event" container=fac0c45ed698df4f76c2a58832cc444b87f39d0d1ec8f830c9cbab8f746f1cdb module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:12 calico-999044 dockerd[1060]: time="2025-11-02T23:24:12.089033048Z" level=info msg="ignoring event" container=bd9b6b0d25fa888f29021ed8f79dac5b7f751fd9ef95f80fa6bfab8d371fb7da module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:12 calico-999044 dockerd[1060]: time="2025-11-02T23:24:12.091321559Z" level=info msg="ignoring event" container=104d5bebead525b6f8d7e5653d9906634862b43f52fbe0599a0516eac1cd18f3 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:13 calico-999044 dockerd[1060]: time="2025-11-02T23:24:13.228680509Z" level=info msg="ignoring event" container=e5a211e29e4b83215dce5111f61cc05a70d29326947d7b0e29438dba8149e6cd module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:13 calico-999044 dockerd[1060]: time="2025-11-02T23:24:13.232048806Z" level=info msg="ignoring event" container=03c8da80a5d1969da3668432f414adc6dab4e02046af068f8c6278be2615520b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:14 calico-999044 dockerd[1060]: time="2025-11-02T23:24:14.528746247Z" level=info msg="ignoring event" container=e3ef798be033bbd7b1229da33bc6926aed958630402a0dc391bbe3050ef0242f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:14 calico-999044 dockerd[1060]: time="2025-11-02T23:24:14.529315168Z" level=info msg="ignoring event" container=e9d2d32159bdce6c1084ace5cde4f9a1f6ed4005f201d3d382f918417e29fb33 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:14 calico-999044 dockerd[1060]: time="2025-11-02T23:24:14.811944221Z" level=info msg="ignoring event" container=c6bf997abba43fd5fa66e7ac1aaea29756157623933e9372c7d81c5553dc9d87 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:15 calico-999044 dockerd[1060]: time="2025-11-02T23:24:15.000954474Z" level=info msg="ignoring event" container=b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:15 calico-999044 dockerd[1060]: time="2025-11-02T23:24:15.002392026Z" level=info msg="ignoring event" container=7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://calico-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 35 + Running: 20 + Paused: 0 + Stopped: 15 + Images: 12 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: calico-999044 + ID: 2e119dc9-3d6d-4679-a586-f24b5142f4c6 + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://calico-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://calico-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:44 UTC; 1min 1s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1370 (cri-dockerd) + Tasks: 14 + Memory: 17.7M + CPU: 2.020s + CGroup: /system.slice/cri-docker.service + └─1370 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.262 [INFO][5450] ipam/ipam.go 878: Auto-assigned 1 out of 1 IPv4s: [10.244.176.3/26] handle="k8s-pod-network.5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" host="calico-999044" + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.262 [INFO][5450] ipam/ipam_plugin.go 374: Released host-wide IPAM lock. + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.262 [INFO][5450] ipam/ipam_plugin.go 283: Calico CNI IPAM assigned addresses IPv4=[10.244.176.3/26] IPv6=[] ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" HandleID="k8s-pod-network.5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Workload="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.263 [INFO][5437] cni-plugin/k8s.go 418: Populated endpoint ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" endpoint=&v3.WorkloadEndpoint{TypeMeta:v1.TypeMeta{Kind:"WorkloadEndpoint", APIVersion:"projectcalico.org/v3"}, ObjectMeta:v1.ObjectMeta{Name:"calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0", GenerateName:"netcat-cd4db9dbf-", Namespace:"default", SelfLink:"", UID:"8fb21457-8ecf-460b-be19-61ea84636cc0", ResourceVersion:"659", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 23, 24, 25, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"app":"netcat", "pod-template-hash":"cd4db9dbf", "projectcalico.org/namespace":"default", "projectcalico.org/orchestrator":"k8s", "projectcalico.org/serviceaccount":"default"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v3.WorkloadEndpointSpec{Orchestrator:"k8s", Workload:"", Node:"calico-999044", ContainerID:"", Pod:"netcat-cd4db9dbf-7nq9q", Endpoint:"eth0", ServiceAccountName:"default", IPNetworks:[]string{"10.244.176.3/32"}, IPNATs:[]v3.IPNAT(nil), IPv4Gateway:"", IPv6Gateway:"", Profiles:[]string{"kns.default", "ksa.default.default"}, InterfaceName:"cali1466d0b4737", MAC:"", Ports:[]v3.WorkloadEndpointPort(nil), AllowSpoofedSourcePrefixes:[]string(nil), QoSControls:(*v3.QoSControls)(nil)}} + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.263 [INFO][5437] cni-plugin/k8s.go 419: Calico CNI using IPs: [10.244.176.3/32] ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.263 [INFO][5437] cni-plugin/dataplane_linux.go 69: Setting the host side veth name to cali1466d0b4737 ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.264 [INFO][5437] cni-plugin/dataplane_linux.go 508: Disabling IPv4 forwarding ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.265 [INFO][5437] cni-plugin/k8s.go 446: Added Mac, interface name, and active container ID to endpoint ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" endpoint=&v3.WorkloadEndpoint{TypeMeta:v1.TypeMeta{Kind:"WorkloadEndpoint", APIVersion:"projectcalico.org/v3"}, ObjectMeta:v1.ObjectMeta{Name:"calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0", GenerateName:"netcat-cd4db9dbf-", Namespace:"default", SelfLink:"", UID:"8fb21457-8ecf-460b-be19-61ea84636cc0", ResourceVersion:"659", Generation:0, CreationTimestamp:time.Date(2025, time.November, 2, 23, 24, 25, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"app":"netcat", "pod-template-hash":"cd4db9dbf", "projectcalico.org/namespace":"default", "projectcalico.org/orchestrator":"k8s", "projectcalico.org/serviceaccount":"default"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v3.WorkloadEndpointSpec{Orchestrator:"k8s", Workload:"", Node:"calico-999044", ContainerID:"5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5", Pod:"netcat-cd4db9dbf-7nq9q", Endpoint:"eth0", ServiceAccountName:"default", IPNetworks:[]string{"10.244.176.3/32"}, IPNATs:[]v3.IPNAT(nil), IPv4Gateway:"", IPv6Gateway:"", Profiles:[]string{"kns.default", "ksa.default.default"}, InterfaceName:"cali1466d0b4737", MAC:"56:25:06:33:c8:6c", Ports:[]v3.WorkloadEndpointPort(nil), AllowSpoofedSourcePrefixes:[]string(nil), QoSControls:(*v3.QoSControls)(nil)}} + Nov 02 23:24:26 calico-999044 cri-dockerd[1370]: 2025-11-02 23:24:26.268 [INFO][5437] cni-plugin/k8s.go 532: Wrote updated endpoint to datastore ContainerID="5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5" Namespace="default" Pod="netcat-cd4db9dbf-7nq9q" WorkloadEndpoint="calico--999044-k8s-netcat--cd4db9dbf--7nq9q-eth0" + Nov 02 23:24:27 calico-999044 cri-dockerd[1370]: time="2025-11-02T23:24:27Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://calico-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://calico-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://calico-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:43 UTC; 1min 2s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1047 (containerd) + Tasks: 233 + Memory: 114.2M + CPU: 2.409s + CGroup: /system.slice/containerd.service + ├─1047 /usr/bin/containerd + ├─1816 /usr/bin/containerd-shim-runc-v2 -namespace moby -id be24cc3df1e5b258390640cafb4ca89a25d0331f8da3837a54c580281db4e9a9 -address /run/containerd/containerd.sock + ├─1818 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ae8ec048213c07bea4c0fc2b2042dbff95f5f951e015efb3ce2cffee21b63912 -address /run/containerd/containerd.sock + ├─1841 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c5a68c7cdf9c49640f73dd3fe559368985b6d212584e0c9157d69a18119d484c -address /run/containerd/containerd.sock + ├─1897 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2e5c3272146afd2703baacb2f0f8fab091b65705db33ed103ae713fa4832a9a0 -address /run/containerd/containerd.sock + ├─1997 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 76dbbd726305005f1f5951169544901edfa930309dbb1230f936ba3a2cef7677 -address /run/containerd/containerd.sock + ├─2003 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 57fa4d22fc4927ce2b3f47cf2cc089d7d2fed0a77c90b25b32249cfff256a11c -address /run/containerd/containerd.sock + ├─2047 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0a35af3123a44d2203cd28a9673d12a5d4a3995685947b3e6c2ad4866485e65c -address /run/containerd/containerd.sock + ├─2070 /usr/bin/containerd-shim-runc-v2 -namespace moby -id eccae0c56fef6b43ceb945f0384a654078ecb9fa2c152b63d04e260670d92857 -address /run/containerd/containerd.sock + ├─2528 /usr/bin/containerd-shim-runc-v2 -namespace moby -id dde9b4da154f87944fe3dbe3128111c30202eea72d4b57e35dd2529498500cd1 -address /run/containerd/containerd.sock + ├─2538 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2603a408e2c6bc4d9b63c97de33a505de8fc7beea999bf96ceed627484f3fc92 -address /run/containerd/containerd.sock + ├─2613 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 7e284a7fa8255148fb1b6f07889c078c4cd0ab09712ae27ed938da23fe023a12 -address /run/containerd/containerd.sock + ├─3000 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 8dac9ede7692970f6df3964f9f670fcd221a7a8af5948d894f1fe7d9db0e96b7 -address /run/containerd/containerd.sock + ├─3175 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 982b26866991562b471e4a599845d6d22c247864397643805c41cb0f341eacac -address /run/containerd/containerd.sock + ├─4451 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 95b421e96489aaf05b61c92f602a153c119207ed0c7f303913fa479c27154e1f -address /run/containerd/containerd.sock + ├─4842 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0cd87fa4d54f7857b75fc7f7ca68da14c3d25050d5460b72bb36185c389329c6 -address /run/containerd/containerd.sock + ├─4872 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 57adce76f9bb2442b05cffd4fb7e88936876d0f5b63af1966598fb88fa6526c4 -address /run/containerd/containerd.sock + ├─4989 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 8ffa31020589082ced7d4efe0b34145d92606dc0db8bea723b067a65046df579 -address /run/containerd/containerd.sock + ├─5268 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 4ef632533934d272648f9f4d136c5cd40382650fac7549ea0b82e983297ddcbb -address /run/containerd/containerd.sock + ├─5395 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5b0a41ed8d2df07f6cb8986c9ef4b73d7b1d94071c83579d63cbed3f93f40fb5 -address /run/containerd/containerd.sock + └─5554 /usr/bin/containerd-shim-runc-v2 -namespace moby -id fe56587fb359a7beae2c679f568b02dde0f6b58401cf3269ec68d0694437ad63 -address /run/containerd/containerd.sock + + Nov 02 23:24:14 calico-999044 containerd[1047]: time="2025-11-02T23:24:14.529234278Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:24:14 calico-999044 containerd[1047]: time="2025-11-02T23:24:14.811873197Z" level=info msg="shim disconnected" id=c6bf997abba43fd5fa66e7ac1aaea29756157623933e9372c7d81c5553dc9d87 namespace=moby + Nov 02 23:24:14 calico-999044 containerd[1047]: time="2025-11-02T23:24:14.812017905Z" level=warning msg="cleaning up after shim disconnected" id=c6bf997abba43fd5fa66e7ac1aaea29756157623933e9372c7d81c5553dc9d87 namespace=moby + Nov 02 23:24:14 calico-999044 containerd[1047]: time="2025-11-02T23:24:14.812029335Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.000739922Z" level=info msg="shim disconnected" id=b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.000767949Z" level=warning msg="cleaning up after shim disconnected" id=b0cd42e55b60b8e886ef8a69764dd3a14ed611110c411f8a7a6b0853fa6c861d namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.000774489Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.002352111Z" level=info msg="shim disconnected" id=7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09 namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.002462904Z" level=warning msg="cleaning up after shim disconnected" id=7749886edb8c6937a6b4da9f9796c5c93702493a735fbed26e6891c4ac436e09 namespace=moby + Nov 02 23:24:15 calico-999044 containerd[1047]: time="2025-11-02T23:24:15.002472055Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://calico-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://calico-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://calico-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:24:47.985957868Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:47.985976561Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:47.986001661Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:24:47.986015465Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:24:47.986054334Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:24:47.986139988Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: calico-999044 [took: 12.948560032s] -------------------------------- + helpers_test.go:175: Cleaning up "calico-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p calico-999044 +=== RUN TestNetworkPlugins/group/false/HairPin + net_test.go:264: (dbg) Run: kubectl --context false-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "false" test finished in 4m48.613225288s, failed=false + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p calico-999044: (1.844036312s) +=== CONT TestNetworkPlugins/group/bridge +=== RUN TestNetworkPlugins/group/bridge/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p bridge-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker --container-runtime=docker + net_test.go:211: + ----------------------- debugLogs start: custom-flannel-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-ntqvl + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.85.2 custom-flannel-999044 + 192.168.85.1 host.minikube.internal + 192.168.85.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.85.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/custom-flannel-999044 Ready control-plane 47s v1.34.1 192.168.85.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 45s + default service/netcat ClusterIP 10.98.198.69 8080/TCP 14s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 44s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.85.2:8443 45s + default endpoints/netcat 10.244.0.3:8080 14s + kube-system endpoints/k8s.io-minikube-hostpath 39s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 40s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-flannel daemonset.apps/kube-flannel-ds 1 1 1 1 1 44s kube-flannel docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 app=flannel + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 44s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 14s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 44s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-ntqvl 1/1 Running 0 14s 10.244.0.3 custom-flannel-999044 + kube-flannel pod/kube-flannel-ds-7psrd 1/1 Running 0 39s 192.168.85.2 custom-flannel-999044 + kube-system pod/coredns-66bc5c9577-hcgf7 1/1 Running 0 38s 10.244.0.2 custom-flannel-999044 + kube-system pod/etcd-custom-flannel-999044 1/1 Running 0 45s 192.168.85.2 custom-flannel-999044 + kube-system pod/kube-apiserver-custom-flannel-999044 1/1 Running 0 45s 192.168.85.2 custom-flannel-999044 + kube-system pod/kube-controller-manager-custom-flannel-999044 1/1 Running 0 45s 192.168.85.2 custom-flannel-999044 + kube-system pod/kube-proxy-zqtl2 1/1 Running 0 39s 192.168.85.2 custom-flannel-999044 + kube-system pod/kube-scheduler-custom-flannel-999044 1/1 Running 0 45s 192.168.85.2 custom-flannel-999044 + kube-system pod/storage-provisioner 1/1 Running 0 39s 192.168.85.2 custom-flannel-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 55a0ede2ac7de 14 seconds ago Ready netcat-cd4db9dbf-ntqvl default 0 (default) + 22b021222b543 17 seconds ago Ready coredns-66bc5c9577-hcgf7 kube-system 0 (default) + 7ddd9f8d25633 18 seconds ago Ready storage-provisioner kube-system 0 (default) + 8f89e7d7960fb 38 seconds ago Ready kube-flannel-ds-7psrd kube-flannel 0 (default) + 03f2f27f2ddfd 38 seconds ago Ready kube-proxy-zqtl2 kube-system 0 (default) + d6ba6f1e0ab0b 49 seconds ago Ready kube-scheduler-custom-flannel-999044 kube-system 0 (default) + 08892eacbe9a4 49 seconds ago Ready kube-controller-manager-custom-flannel-999044 kube-system 0 (default) + ba38a261a975f 49 seconds ago Ready kube-apiserver-custom-flannel-999044 kube-system 0 (default) + 5251a8cf81038 49 seconds ago Ready etcd-custom-flannel-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 04c6751b67eb2 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 13 seconds ago Running dnsutils 0 55a0ede2ac7de netcat-cd4db9dbf-ntqvl default + 9282c71332d48 52546a367cc9e 17 seconds ago Running coredns 0 22b021222b543 coredns-66bc5c9577-hcgf7 kube-system + deca544d98354 6e38f40d628db 17 seconds ago Running storage-provisioner 0 7ddd9f8d25633 storage-provisioner kube-system + 4aa184bfd0beb b5c6c9203f83e 20 seconds ago Running kube-flannel 0 8f89e7d7960fb kube-flannel-ds-7psrd kube-flannel + a3c39da6d3ded rancher/mirrored-flannelcni-flannel@sha256:ec0f0b7430c8370c9f33fe76eb0392c1ad2ddf4ccaf2b9f43995cca6c94d3832 20 seconds ago Exited install-cni 0 8f89e7d7960fb kube-flannel-ds-7psrd kube-flannel + 3044066f312b2 rancher/mirrored-flannelcni-flannel-cni-plugin@sha256:28d3a6be9f450282bf42e4dad143d41da23e3d91f66f19c01ee7fd21fd17cb2b 32 seconds ago Exited install-cni-plugin 0 8f89e7d7960fb kube-flannel-ds-7psrd kube-flannel + b0687d9a66f57 fc25172553d79 36 seconds ago Running kube-proxy 0 03f2f27f2ddfd kube-proxy-zqtl2 kube-system + 68ba2ab023025 c80c8dbafe7dd 48 seconds ago Running kube-controller-manager 0 08892eacbe9a4 kube-controller-manager-custom-flannel-999044 kube-system + 05259b19afcc1 7dd6aaa1717ab 48 seconds ago Running kube-scheduler 0 d6ba6f1e0ab0b kube-scheduler-custom-flannel-999044 kube-system + 9eb2c19505351 5f1f5298c888d 48 seconds ago Running etcd 0 5251a8cf81038 etcd-custom-flannel-999044 kube-system + bce100d50209a c3994bc696102 48 seconds ago Running kube-apiserver 0 ba38a261a975f kube-apiserver-custom-flannel-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:24:30 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 14s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-ntqvl + Namespace: default + Priority: 0 + Service Account: default + Node: custom-flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:24:30 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://04c6751b67eb2396b5295a5d25cfcf781e497ff21fde6c58a9c509b287e8c07f + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:24:31 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-kfjmt (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-kfjmt: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15s default-scheduler Successfully assigned default/netcat-cd4db9dbf-ntqvl to custom-flannel-999044 + Normal Pulling 15s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 14s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 845ms (845ms including waiting). Image size: 127004766 bytes. + Normal Created 14s kubelet Created container: dnsutils + Normal Started 14s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:24:00 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 39s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 37s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-hcgf7 + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: custom-flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:24:26 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://9282c71332d4863b516970b512bbde3b25ed1715d6f256670356acfbb11658a4 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:24:27 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-96frb (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-96frb: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 38s default-scheduler 0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling. + Normal Scheduled 19s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-hcgf7 to custom-flannel-999044 + Normal Pulled 18s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 18s kubelet Created container: coredns + Normal Started 18s kubelet Started container coredns + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + .:53 + [INFO] plugin/reload: Running configuration SHA512 = fa9a0cdcdddcb4be74a0eaf7cfcb211c40e29ddf5507e03bbfc0065bade31f0f2641a2513136e246f32328dd126fc93236fb5c595246f0763926a524386705e8 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:49129 - 43892 "HINFO IN 7899031585561935370.3891723458350664836. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.885646218s + [INFO] 10.244.0.3:40776 - 28753 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000311783s + [INFO] 10.244.0.3:49301 - 52162 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000149877s + [INFO] 10.244.0.3:54564 - 494 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000080606s + [INFO] 10.244.0.3:42898 - 3787 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000119368s + [INFO] 10.244.0.3:42898 - 3569 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000129739s + [INFO] 10.244.0.3:39901 - 32368 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000086765s + [INFO] 10.244.0.3:39226 - 27477 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000092184s + [INFO] 10.244.0.3:60089 - 20319 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000068107s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-custom-flannel-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: custom-flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:23:59 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.85.2:8443 + kubernetes.io/config.hash: 71b296ad899e92ac3db44ae7df3030a9 + kubernetes.io/config.mirror: 71b296ad899e92ac3db44ae7df3030a9 + kubernetes.io/config.seen: 2025-11-02T23:23:59.852192128Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: Node/custom-flannel-999044 + Containers: + kube-apiserver: + Container ID: docker://bce100d50209a3e6d902733bd95c02c0d048d817a2544465fa1d818822479d1e + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.85.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:23:56 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.85.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.85.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.85.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:23:56.165311 1 options.go:263] external host was not specified, using 192.168.85.2 + I1102 23:23:56.166634 1 server.go:150] Version: v1.34.1 + I1102 23:23:56.166707 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:23:56.456033 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:23:56.456048 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:23:56.456053 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:23:56.456055 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:23:56.456057 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:23:56.456059 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:23:56.456061 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:23:56.456064 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:23:56.456066 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:23:56.456068 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:23:56.456070 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:23:56.456073 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:23:56.464334 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:56.464503 1 logging.go:55] [core] [Channel #1 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:56.465106 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:23:56.468924 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:56.474357 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:23:56.474369 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:23:56.474527 1 instance.go:239] Using reconciler: lease + W1102 23:23:56.475156 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.060368 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.065703 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:57.071080 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:23:57.071096 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.073292 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:23:57.073757 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.077595 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.081146 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.084513 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.088252 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.092295 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.096680 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.101134 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.107049 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.110253 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.113657 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.117496 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.120757 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.124615 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.128008 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.131408 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.135615 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:23:57.151347 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:23:57.151519 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:23:57.152206 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.156004 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.159761 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.163177 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.166766 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.170694 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.174538 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.178400 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.182229 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.185307 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.188658 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.191882 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.195318 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.199149 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.202615 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.206270 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.209639 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.213818 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.217610 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.221095 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.225071 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.228887 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.232670 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.237142 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:57.240236 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:23:57.240786 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.244659 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.253300 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.257366 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.261226 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.264964 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.269433 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.273281 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.276728 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.280208 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:57.284562 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.290441 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.296180 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.308310 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.311582 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:57.314821 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:57.329202 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:23:57.329215 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.329219 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.329493 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:23:57.329498 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.330056 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:23:57.330488 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:23:57.330496 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:23:57.330500 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:23:57.331248 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:23:57.331258 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:23:57.331661 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:23:57.331666 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.331668 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.331977 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:23:57.331991 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.331993 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:23:57.332246 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:23:57.332250 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.333386 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:23:57.333395 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.333634 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:23:57.333644 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.333647 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.334174 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:23:57.334184 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:23:57.335171 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:23:57.335179 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.335185 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.335460 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:23:57.335465 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.335468 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.337036 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:23:57.337070 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.337074 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.337664 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:23:57.337670 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:23:57.337673 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:23:57.337677 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.339528 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:23:57.339538 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:23:57.339541 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:23:57.340435 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:23:57.340442 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.340445 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:57.340706 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:23:57.340712 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.340735 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:23:57.341594 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:23:57.341600 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:23:57.341602 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:23:57.343068 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:57.346669 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:23:57.346678 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:23:57.658250 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:57.658256 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:57.658455 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:23:57.658664 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:23:57.658707 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:23:57.658845 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:23:57.658899 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:23:57.658909 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:23:57.658929 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:23:57.660698 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:57.660864 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:57.658930 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:23:57.671828 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:23:57.671845 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:23:57.671884 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:23:57.671891 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:23:57.658948 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:23:57.673287 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:23:57.659144 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:23:57.673419 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:23:57.659154 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:23:57.659371 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:23:57.659405 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:23:57.659446 1 controller.go:142] Starting OpenAPI controller + I1102 23:23:57.659456 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:23:57.659462 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:23:57.659468 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:23:57.659478 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:23:57.659484 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:23:57.659490 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:23:57.660044 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:23:57.674378 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:23:57.660515 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:23:57.660571 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:23:57.660646 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:23:57.660680 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:23:57.678359 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:23:57.678420 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:23:57.760078 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:23:57.761898 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:23:57.765157 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:23:57.769335 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:57.769354 1 policy_source.go:240] refreshing policies + I1102 23:23:57.772545 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:23:57.772564 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:23:57.772627 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:23:57.772652 1 aggregator.go:171] initial CRD sync complete... + I1102 23:23:57.772657 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:23:57.772663 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:23:57.772667 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:23:57.773425 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:23:57.773449 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:23:57.773521 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:23:57.775054 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:23:57.775063 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:23:57.775216 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:23:57.775277 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:23:57.775816 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:23:57.776404 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:57.779031 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:23:57.780000 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:57.780453 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:23:57.784834 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:23:58.662906 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:23:58.665345 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:23:58.665357 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:23:58.943652 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:23:58.977392 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:23:59.064142 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:23:59.067222 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.85.2] + I1102 23:23:59.067771 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:23:59.070126 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:23:59.691823 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:24:00.098349 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:24:00.102687 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:24:00.107204 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:24:05.442956 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:05.445061 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:05.491560 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:24:06.607780 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:24:30.306336 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.98.198.69"} + E1102 23:24:38.403471 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:56956: use of closed network connection + E1102 23:24:38.470077 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:56968: use of closed network connection + E1102 23:24:38.534459 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:56992: use of closed network connection + E1102 23:24:38.611587 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:57010: use of closed network connection + E1102 23:24:43.678344 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:57026: use of closed network connection + E1102 23:24:43.749474 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42944: use of closed network connection + E1102 23:24:43.816024 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42976: use of closed network connection + E1102 23:24:43.881468 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42998: use of closed network connection + E1102 23:24:43.950591 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:43012: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-flannel.conflist + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0@if339: mtu 1500 qdisc noqueue state UP group default + link/ether 82:6a:a6:6f:b5:cb brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.85.2/24 brd 192.168.85.255 scope global eth0 + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether b6:d9:71:55:92:45 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 4: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 5: flannel.1: mtu 1450 qdisc noqueue state UNKNOWN group default + link/ether 9a:e9:46:03:8f:d4 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.0/32 scope global flannel.1 + valid_lft forever preferred_lft forever + inet6 fe80::98e9:46ff:fe03:8fd4/64 scope link + valid_lft forever preferred_lft forever + 6: cni0: mtu 1450 qdisc noqueue state UP group default qlen 1000 + link/ether 02:98:a2:47:77:98 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/24 brd 10.244.0.255 scope global cni0 + valid_lft forever preferred_lft forever + inet6 fe80::98:a2ff:fe47:7798/64 scope link + valid_lft forever preferred_lft forever + 7: veth4c220ccf@if3: mtu 1450 qdisc noqueue master cni0 state UP group default + link/ether f6:da:00:1d:da:9f brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::f4da:ff:fe1d:da9f/64 scope link + valid_lft forever preferred_lft forever + 8: veth17315b54@if3: mtu 1450 qdisc noqueue master cni0 state UP group default + link/ether 8a:d9:17:fa:d7:f0 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::88d9:17ff:fefa:d7f0/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.85.1 dev eth0 + 10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.85.0/24 dev eth0 proto kernel scope link src 192.168.85.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:45 2025 + *mangle + :PREROUTING ACCEPT [26001:81190534] + :INPUT ACCEPT [25966:81187736] + :FORWARD ACCEPT [35:2798] + :OUTPUT ACCEPT [18851:7569832] + :POSTROUTING ACCEPT [18886:7572630] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:24:45 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:45 2025 + *filter + :INPUT ACCEPT [4411:1075781] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4365:1389669] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :FLANNEL-FWD - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m comment --comment "flanneld forward" -j FLANNEL-FWD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A FLANNEL-FWD -s 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT + -A FLANNEL-FWD -d 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:45 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:45 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [59:3540] + :POSTROUTING ACCEPT [68:4215] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :FLANNEL-POSTRTG - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-BL4BN7UU5M2OIW4P - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.85.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.85.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "flanneld masq" -j FLANNEL-POSTRTG + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.85.1/32 -j DOCKER_POSTROUTING + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.85.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:37659 + -A DOCKER_OUTPUT -d 192.168.85.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:49613 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 37659 -j SNAT --to-source 192.168.85.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 49613 -j SNAT --to-source 192.168.85.1:53 + -A FLANNEL-POSTRTG -m mark --mark 0x4000/0x4000 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG -s 10.244.0.0/16 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG -s 10.244.0.0/16 ! -d 224.0.0.0/4 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully + -A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/24 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-BL4BN7UU5M2OIW4P -s 192.168.85.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-BL4BN7UU5M2OIW4P -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.85.2:8443 + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.98.198.69/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.85.2:8443" -j KUBE-SEP-BL4BN7UU5M2OIW4P + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.98.198.69/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:24:45 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 55 3460 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.85.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 59 packets, 3540 bytes) + pkts bytes target prot opt in out source destination + 778 68670 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 646 62915 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.85.1 + 86 5160 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 68 packets, 4215 bytes) + pkts bytes target prot opt in out source destination + 469 40690 FLANNEL-POSTRTG 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* flanneld masq */ + 788 69405 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.85.1 + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.85.1 tcp dpt:53 to:127.0.0.11:37659 + 647 63000 DNAT 17 -- * * 0.0.0.0/0 192.168.85.1 udp dpt:53 to:127.0.0.11:49613 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:37659 to:192.168.85.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:49613 to:192.168.85.1:53 + + Chain FLANNEL-POSTRTG (1 references) + pkts bytes target prot opt in out source destination + 2 120 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match 0x4000/0x4000 /* flanneld masq */ + 11 795 RETURN 0 -- * * 10.244.0.0/16 10.244.0.0/16 /* flanneld masq */ + 0 0 MASQUERADE 0 -- * * 10.244.0.0/16 !224.0.0.0/4 /* flanneld masq */ random-fully + 0 0 RETURN 0 -- * * !10.244.0.0/16 10.244.0.0/24 /* flanneld masq */ + 0 0 MASQUERADE 0 -- * * !10.244.0.0/16 10.244.0.0/16 /* flanneld masq */ random-fully + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 68 4215 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-BL4BN7UU5M2OIW4P (1 references) + pkts bytes target prot opt in out source destination + 2 120 KUBE-MARK-MASQ 0 -- * * 192.168.85.2 0.0.0.0/0 /* default/kubernetes:https */ + 5 300 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.85.2:8443 + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.98.198.69 /* default/netcat cluster IP */ tcp dpt:8080 + 95 5700 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 2 120 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 5 300 KUBE-SEP-BL4BN7UU5M2OIW4P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.85.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.98.198.69 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe flannel daemon set: + Name: kube-flannel-ds + Namespace: kube-flannel + Selector: app=flannel + Node-Selector: + Labels: app=flannel + tier=node + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: app=flannel + tier=node + Service Account: flannel + Init Containers: + install-cni-plugin: + Image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 + Port: + Host Port: + Command: + cp + Args: + -f + /flannel + /opt/cni/bin/flannel + Environment: + Mounts: + /opt/cni/bin from cni-plugin (rw) + install-cni: + Image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + Port: + Host Port: + Command: + cp + Args: + -f + /etc/kube-flannel/cni-conf.json + /etc/cni/net.d/10-flannel.conflist + Environment: + Mounts: + /etc/cni/net.d from cni (rw) + /etc/kube-flannel/ from flannel-cfg (rw) + Containers: + kube-flannel: + Image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + Port: + Host Port: + Command: + /opt/bin/flanneld + Args: + --ip-masq + --kube-subnet-mgr + Limits: + cpu: 100m + memory: 50Mi + Requests: + cpu: 100m + memory: 50Mi + Environment: + POD_NAME: (v1:metadata.name) + POD_NAMESPACE: (v1:metadata.namespace) + EVENT_QUEUE_DEPTH: 5000 + Mounts: + /etc/kube-flannel/ from flannel-cfg (rw) + /run/flannel from run (rw) + /run/xtables.lock from xtables-lock (rw) + Volumes: + run: + Type: HostPath (bare host directory volume) + Path: /run/flannel + HostPathType: + cni-plugin: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: + cni: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + flannel-cfg: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-flannel-cfg + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + Priority Class Name: system-node-critical + Node-Selectors: + Tolerations: :NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 41s daemonset-controller Created pod: kube-flannel-ds-7psrd + + + >>> k8s: describe flannel pod(s): + Name: kube-flannel-ds-7psrd + Namespace: kube-flannel + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: flannel + Node: custom-flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:24:05 +0000 + Labels: app=flannel + controller-revision-hash=75cb9c56bb + pod-template-generation=1 + tier=node + Annotations: + Status: Running + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: DaemonSet/kube-flannel-ds + Init Containers: + install-cni-plugin: + Container ID: docker://3044066f312b2f1f7b48eb421b616b837b97d779b96538b032a5d9e49d06a614 + Image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0 + Image ID: docker-pullable://rancher/mirrored-flannelcni-flannel-cni-plugin@sha256:28d3a6be9f450282bf42e4dad143d41da23e3d91f66f19c01ee7fd21fd17cb2b + Port: + Host Port: + Command: + cp + Args: + -f + /flannel + /opt/cni/bin/flannel + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:24:12 +0000 + Finished: Sun, 02 Nov 2025 23:24:12 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /opt/cni/bin from cni-plugin (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qz5sc (ro) + install-cni: + Container ID: docker://a3c39da6d3ded93212e61c228e169aacac05678f6f3b1d77b75b0c001ffdaa13 + Image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + Image ID: docker-pullable://rancher/mirrored-flannelcni-flannel@sha256:ec0f0b7430c8370c9f33fe76eb0392c1ad2ddf4ccaf2b9f43995cca6c94d3832 + Port: + Host Port: + Command: + cp + Args: + -f + /etc/kube-flannel/cni-conf.json + /etc/cni/net.d/10-flannel.conflist + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:24:24 +0000 + Finished: Sun, 02 Nov 2025 23:24:24 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /etc/cni/net.d from cni (rw) + /etc/kube-flannel/ from flannel-cfg (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qz5sc (ro) + Containers: + kube-flannel: + Container ID: docker://4aa184bfd0beb3338c5482f03997f0c457de114724f3f21f4d9f1d5bcdbfb292 + Image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2 + Image ID: docker-pullable://rancher/mirrored-flannelcni-flannel@sha256:ec0f0b7430c8370c9f33fe76eb0392c1ad2ddf4ccaf2b9f43995cca6c94d3832 + Port: + Host Port: + Command: + /opt/bin/flanneld + Args: + --ip-masq + --kube-subnet-mgr + State: Running + Started: Sun, 02 Nov 2025 23:24:25 +0000 + Ready: True + Restart Count: 0 + Limits: + cpu: 100m + memory: 50Mi + Requests: + cpu: 100m + memory: 50Mi + Environment: + POD_NAME: kube-flannel-ds-7psrd (v1:metadata.name) + POD_NAMESPACE: kube-flannel (v1:metadata.namespace) + EVENT_QUEUE_DEPTH: 5000 + Mounts: + /etc/kube-flannel/ from flannel-cfg (rw) + /run/flannel from run (rw) + /run/xtables.lock from xtables-lock (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-qz5sc (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + run: + Type: HostPath (bare host directory volume) + Path: /run/flannel + HostPathType: + cni-plugin: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: + cni: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + flannel-cfg: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-flannel-cfg + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + kube-api-access-qz5sc: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoSchedule op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 41s default-scheduler Successfully assigned kube-flannel/kube-flannel-ds-7psrd to custom-flannel-999044 + Normal Pulling 38s kubelet Pulling image "docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0" + Normal Pulled 34s kubelet Successfully pulled image "docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0" in 4.216s (4.216s including waiting). Image size: 8087907 bytes. + Normal Created 34s kubelet Created container: install-cni-plugin + Normal Started 34s kubelet Started container install-cni-plugin + Normal Pulling 34s kubelet Pulling image "docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2" + Normal Pulled 22s kubelet Successfully pulled image "docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2" in 11.218s (11.218s including waiting). Image size: 59604122 bytes. + Normal Created 22s kubelet Created container: install-cni + Normal Started 22s kubelet Started container install-cni + Normal Pulled 22s kubelet Container image "docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2" already present on machine + Normal Created 21s kubelet Created container: kube-flannel + Normal Started 21s kubelet Started container kube-flannel + + + >>> k8s: flannel container(s) logs (current): + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.192355 1 kube.go:452] Creating the node lease for IPv4. This is the n.Spec.PodCIDRs: [10.244.0.0/24] + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.192634 1 main.go:416] Current network or subnet (10.244.0.0/16, 10.244.0.0/24) is not equal to previous one (0.0.0.0/0, 0.0.0.0/0), trying to recycle old iptables rules + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.354092 1 main.go:342] Setting up masking rules + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.355707 1 main.go:364] Changing default FORWARD chain policy to ACCEPT + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.357205 1 main.go:379] Wrote subnet file to /run/flannel/subnet.env + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.357220 1 main.go:383] Running backend. + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.357304 1 vxlan_network.go:61] watching for new subnet leases + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.361424 1 main.go:404] Waiting for all goroutines to exit + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.552699 1 iptables.go:270] bootstrap done + [pod/kube-flannel-ds-7psrd/kube-flannel] I1102 23:24:26.650784 1 iptables.go:270] bootstrap done + + + >>> k8s: flannel container(s) logs (previous): + error: previous terminated container "install-cni-plugin" in pod "kube-flannel-ds-7psrd" not found + error: previous terminated container "install-cni" in pod "kube-flannel-ds-7psrd" not found + error: previous terminated container "kube-flannel" in pod "kube-flannel-ds-7psrd" not found + + + >>> host: /run/flannel/subnet.env: + FLANNEL_NETWORK=10.244.0.0/16 + FLANNEL_SUBNET=10.244.0.1/24 + FLANNEL_MTU=1450 + FLANNEL_IPMASQ=true + + + >>> host: /etc/kube-flannel/cni-conf.json: + cat: /etc/kube-flannel/cni-conf.json: No such file or directory + ssh: Process exited with status 1 + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 41s daemonset-controller Created pod: kube-proxy-zqtl2 + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-zqtl2 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: custom-flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:24:05 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://b0687d9a66f579eecfe51562a01c58852ae295a0974b4a52a42b5b79c8ede5ae + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:24:08 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-lsjzh (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-lsjzh: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 41s default-scheduler Successfully assigned kube-system/kube-proxy-zqtl2 to custom-flannel-999044 + Normal Pulled 38s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 38s kubelet Created container: kube-proxy + Normal Started 38s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:24:08.541093 1 server_linux.go:53] "Using iptables proxy" + I1102 23:24:08.590678 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:24:08.691718 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:24:08.691738 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.85.2"] + E1102 23:24:08.691774 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:24:08.707581 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:24:08.707601 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:24:08.711365 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:24:08.711535 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:24:08.711551 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:24:08.712906 1 config.go:200] "Starting service config controller" + I1102 23:24:08.712954 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:24:08.712974 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:24:08.712986 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:24:08.713077 1 config.go:309] "Starting node config controller" + I1102 23:24:08.713083 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:24:08.713146 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:24:08.713151 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:24:08.813777 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:24:08.813814 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:24:08.813825 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + I1102 23:24:08.813906 1 shared_informer.go:356] "Caches are synced" controller="node config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://custom-flannel-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://custom-flannel-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:59 UTC; 47s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2220 (kubelet) + Tasks: 16 (limit: 629145) + Memory: 33.1M + CPU: 1.138s + CGroup: /system.slice/kubelet.service + └─2220 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=custom-flannel-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2 + + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.610284 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-flannel/kube-flannel-ds-7psrd" podStartSLOduration=5.576483434 podStartE2EDuration="21.610271086s" podCreationTimestamp="2025-11-02 23:24:05 +0000 UTC" firstStartedPulling="2025-11-02 23:24:08.127398765 +0000 UTC m=+8.322962532" lastFinishedPulling="2025-11-02 23:24:24.161186426 +0000 UTC m=+24.356750184" observedRunningTime="2025-11-02 23:24:26.010258389 +0000 UTC m=+26.205822160" watchObservedRunningTime="2025-11-02 23:24:26.610271086 +0000 UTC m=+26.805834851" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638553 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96frb\" (UniqueName: \"kubernetes.io/projected/e1b7ebe2-46c6-4042-b843-2f198af0d297-kube-api-access-96frb\") pod \"coredns-66bc5c9577-hcgf7\" (UID: \"e1b7ebe2-46c6-4042-b843-2f198af0d297\") " pod="kube-system/coredns-66bc5c9577-hcgf7" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638590 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/bccd5c30-a6d1-43b9-8fbe-c052c1ab0357-tmp\") pod \"storage-provisioner\" (UID: \"bccd5c30-a6d1-43b9-8fbe-c052c1ab0357\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638608 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltbgc\" (UniqueName: \"kubernetes.io/projected/bccd5c30-a6d1-43b9-8fbe-c052c1ab0357-kube-api-access-ltbgc\") pod \"storage-provisioner\" (UID: \"bccd5c30-a6d1-43b9-8fbe-c052c1ab0357\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638623 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1b7ebe2-46c6-4042-b843-2f198af0d297-config-volume\") pod \"coredns-66bc5c9577-hcgf7\" (UID: \"e1b7ebe2-46c6-4042-b843-2f198af0d297\") " pod="kube-system/coredns-66bc5c9577-hcgf7" + Nov 02 23:24:28 custom-flannel-999044 kubelet[2220]: I1102 23:24:28.023216 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-hcgf7" podStartSLOduration=22.023202754 podStartE2EDuration="22.023202754s" podCreationTimestamp="2025-11-02 23:24:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:28.02294131 +0000 UTC m=+28.218505093" watchObservedRunningTime="2025-11-02 23:24:28.023202754 +0000 UTC m=+28.218766520" + Nov 02 23:24:28 custom-flannel-999044 kubelet[2220]: I1102 23:24:28.033008 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=23.032994939 podStartE2EDuration="23.032994939s" podCreationTimestamp="2025-11-02 23:24:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:28.032857836 +0000 UTC m=+28.228421607" watchObservedRunningTime="2025-11-02 23:24:28.032994939 +0000 UTC m=+28.228558715" + Nov 02 23:24:30 custom-flannel-999044 kubelet[2220]: I1102 23:24:30.358185 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfjmt\" (UniqueName: \"kubernetes.io/projected/4e902ac7-c590-4309-bd14-7af633f026b5-kube-api-access-kfjmt\") pod \"netcat-cd4db9dbf-ntqvl\" (UID: \"4e902ac7-c590-4309-bd14-7af633f026b5\") " pod="default/netcat-cd4db9dbf-ntqvl" + Nov 02 23:24:32 custom-flannel-999044 kubelet[2220]: I1102 23:24:32.051650 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-ntqvl" podStartSLOduration=1.2060038 podStartE2EDuration="2.051637139s" podCreationTimestamp="2025-11-02 23:24:30 +0000 UTC" firstStartedPulling="2025-11-02 23:24:30.673666919 +0000 UTC m=+30.869230670" lastFinishedPulling="2025-11-02 23:24:31.519300242 +0000 UTC m=+31.714864009" observedRunningTime="2025-11-02 23:24:32.051450868 +0000 UTC m=+32.247014642" watchObservedRunningTime="2025-11-02 23:24:32.051637139 +0000 UTC m=+32.247200903" + Nov 02 23:24:43 custom-flannel-999044 kubelet[2220]: E1102 23:24:43.950634 2220 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:55560->[::1]:43357: write tcp [::1]:55560->[::1]:43357: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://custom-flannel-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://custom-flannel-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=custom-flannel-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:23:51 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:23:51 custom-flannel-999044 kubelet[1538]: E1102 23:23:51.818824 1538 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:51 custom-flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:51 custom-flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:52 custom-flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:52 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:23:52 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:23:52 custom-flannel-999044 kubelet[1624]: E1102 23:23:52.525061 1624 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:52 custom-flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:52 custom-flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:23:53 custom-flannel-999044 kubelet[1707]: E1102 23:23:53.231319 1707 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:23:53 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:23:54 custom-flannel-999044 kubelet[1718]: E1102 23:23:54.025350 1718 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:23:54 custom-flannel-999044 kubelet[1727]: E1102 23:23:54.774522 1727 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:54 custom-flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:55 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:23:55 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.486664 1759 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.486715 1759 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.486737 1759 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.486742 1759 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.486907 1759 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.489934 1759 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.489926 1759 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.85.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.492586 1759 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496057 1759 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496074 1759 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496209 1759 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496224 1759 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"custom-flannel-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496313 1759 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496318 1759 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.496379 1759 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.497058 1759 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.497198 1759 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.497210 1759 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.497236 1759 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.497278 1759 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.497867 1759 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.85.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dcustom-flannel-999044&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.498099 1759 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.85.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.499168 1759 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.499488 1759 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.499503 1759 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: W1102 23:23:55.499536 1759 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500110 1759 server.go:1262] "Started kubelet" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500148 1759 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500204 1759 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500280 1759 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500493 1759 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.500484 1759 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.85.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.85.2:8443: connect: connection refused" event="&Event{ObjectMeta:{custom-flannel-999044.1874541c7a857ffe default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:custom-flannel-999044,UID:custom-flannel-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:custom-flannel-999044,},FirstTimestamp:2025-11-02 23:23:55.500093438 +0000 UTC m=+0.211430116,LastTimestamp:2025-11-02 23:23:55.500093438 +0000 UTC m=+0.211430116,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:custom-flannel-999044,}" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500707 1759 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.500836 1759 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.500984 1759 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"custom-flannel-999044\" not found" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.501036 1759 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.502161 1759 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.502345 1759 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.502616 1759 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.502887 1759 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.85.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.502958 1759 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/custom-flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.504024 1759 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.504152 1759 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.505692 1759 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.506056 1759 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.511939 1759 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512016 1759 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512030 1759 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512430 1759 policy_none.go:49] "None policy: Start" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512443 1759 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512453 1759 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.512767 1759 policy_none.go:47] "Start" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.514494 1759 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.514510 1759 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.514534 1759 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.514578 1759 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.514899 1759 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.85.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.542567 1759 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.542654 1759 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.542662 1759 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.542792 1759 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.543306 1759 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.543344 1759 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"custom-flannel-999044\" not found" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.625437 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.628607 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.637633 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.640376 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.643835 1759 kubelet_node_status.go:75] "Attempting to register node" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.644126 1759 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.85.2:8443/api/v1/nodes\": dial tcp 192.168.85.2:8443: connect: connection refused" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.703427 1759 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/custom-flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.803954 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-etc-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.803989 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-usr-local-share-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804008 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-usr-share-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804024 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-flexvolume-dir\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804042 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-k8s-certs\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804056 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-kubeconfig\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804069 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-usr-share-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804081 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/bbb3f344d7fd960cd04f39c3970f08dd-etcd-certs\") pod \"etcd-custom-flannel-999044\" (UID: \"bbb3f344d7fd960cd04f39c3970f08dd\") " pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804163 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-etc-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804188 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-usr-local-share-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804211 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/51b61f910b18c7c6b83745f2624a12e4-kubeconfig\") pod \"kube-scheduler-custom-flannel-999044\" (UID: \"51b61f910b18c7c6b83745f2624a12e4\") " pod="kube-system/kube-scheduler-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804239 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-ca-certs\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804257 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-k8s-certs\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804291 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-ca-certs\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.804313 1759 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/bbb3f344d7fd960cd04f39c3970f08dd-etcd-data\") pod \"etcd-custom-flannel-999044\" (UID: \"bbb3f344d7fd960cd04f39c3970f08dd\") " pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.830197 1759 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.85.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.85.2:8443: connect: connection refused" event="&Event{ObjectMeta:{custom-flannel-999044.1874541c7a857ffe default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:custom-flannel-999044,UID:custom-flannel-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:custom-flannel-999044,},FirstTimestamp:2025-11-02 23:23:55.500093438 +0000 UTC m=+0.211430116,LastTimestamp:2025-11-02 23:23:55.500093438 +0000 UTC m=+0.211430116,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:custom-flannel-999044,}" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: I1102 23:23:55.844790 1759 kubelet_node_status.go:75] "Attempting to register node" node="custom-flannel-999044" + Nov 02 23:23:55 custom-flannel-999044 kubelet[1759]: E1102 23:23:55.845059 1759 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.85.2:8443/api/v1/nodes\": dial tcp 192.168.85.2:8443: connect: connection refused" node="custom-flannel-999044" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: E1102 23:23:56.104679 1759 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/custom-flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: I1102 23:23:56.245761 1759 kubelet_node_status.go:75] "Attempting to register node" node="custom-flannel-999044" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: E1102 23:23:56.529739 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: E1102 23:23:56.537082 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: E1102 23:23:56.542279 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:56 custom-flannel-999044 kubelet[1759]: E1102 23:23:56.546281 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.551670 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.552136 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.552811 1759 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.691779 1759 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"custom-flannel-999044\" not found" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: I1102 23:23:57.780881 1759 kubelet_node_status.go:78] "Successfully registered node" node="custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.780927 1759 kubelet_node_status.go:486] "Error updating node status, will retry" err="error getting node \"custom-flannel-999044\": node \"custom-flannel-999044\" not found" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: I1102 23:23:57.802255 1759 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.805016 1759 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-custom-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: I1102 23:23:57.805126 1759 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.806357 1759 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-custom-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: I1102 23:23:57.806375 1759 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.807347 1759 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-custom-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: I1102 23:23:57.807385 1759 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:57 custom-flannel-999044 kubelet[1759]: E1102 23:23:57.808388 1759 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-custom-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:58 custom-flannel-999044 kubelet[1759]: I1102 23:23:58.499097 1759 apiserver.go:52] "Watching apiserver" + Nov 02 23:23:58 custom-flannel-999044 kubelet[1759]: I1102 23:23:58.503472 1759 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:23:58 custom-flannel-999044 kubelet[1759]: I1102 23:23:58.554270 1759 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:58 custom-flannel-999044 kubelet[1759]: E1102 23:23:58.555801 1759 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-custom-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:23:59 custom-flannel-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:23:59 custom-flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:23:59 custom-flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.830436 2220 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.830496 2220 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.830513 2220 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.830518 2220 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.830694 2220 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.831492 2220 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.832731 2220 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.835676 2220 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.839901 2220 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.839943 2220 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.840142 2220 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.840157 2220 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"custom-flannel-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.840276 2220 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.840287 2220 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.840315 2220 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841123 2220 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841289 2220 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841307 2220 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841330 2220 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841345 2220 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.841999 2220 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.842501 2220 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.842526 2220 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.843113 2220 server.go:1262] "Started kubelet" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.843236 2220 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.843228 2220 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.843306 2220 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.843543 2220 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.846589 2220 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.846729 2220 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.847445 2220 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.847527 2220 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.847687 2220 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: E1102 23:23:59.848278 2220 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"custom-flannel-999044\" not found" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.848479 2220 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.851175 2220 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.851255 2220 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.852909 2220 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.860038 2220 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.861320 2220 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.861336 2220 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.861364 2220 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: E1102 23:23:59.861403 2220 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867164 2220 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867177 2220 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867193 2220 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867286 2220 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867294 2220 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867307 2220 policy_none.go:49] "None policy: Start" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867315 2220 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867323 2220 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867395 2220 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.867662 2220 policy_none.go:47] "Start" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: E1102 23:23:59.872071 2220 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.872188 2220 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.872195 2220 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.872365 2220 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: E1102 23:23:59.872783 2220 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.962435 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.962516 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.962562 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.962634 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.973091 2220 kubelet_node_status.go:75] "Attempting to register node" node="custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.976519 2220 kubelet_node_status.go:124] "Node was previously registered" node="custom-flannel-999044" + Nov 02 23:23:59 custom-flannel-999044 kubelet[2220]: I1102 23:23:59.976587 2220 kubelet_node_status.go:78] "Successfully registered node" node="custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.148990 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-etc-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149016 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-k8s-certs\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149040 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-kubeconfig\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149053 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-usr-local-share-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149071 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/51b61f910b18c7c6b83745f2624a12e4-kubeconfig\") pod \"kube-scheduler-custom-flannel-999044\" (UID: \"51b61f910b18c7c6b83745f2624a12e4\") " pod="kube-system/kube-scheduler-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149131 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-ca-certs\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149155 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-k8s-certs\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149173 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-ca-certs\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149186 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/bbb3f344d7fd960cd04f39c3970f08dd-etcd-data\") pod \"etcd-custom-flannel-999044\" (UID: \"bbb3f344d7fd960cd04f39c3970f08dd\") " pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149195 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/bbb3f344d7fd960cd04f39c3970f08dd-etcd-certs\") pod \"etcd-custom-flannel-999044\" (UID: \"bbb3f344d7fd960cd04f39c3970f08dd\") " pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149204 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-etc-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149214 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-usr-local-share-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149223 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-flexvolume-dir\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149231 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/71b296ad899e92ac3db44ae7df3030a9-usr-share-ca-certificates\") pod \"kube-apiserver-custom-flannel-999044\" (UID: \"71b296ad899e92ac3db44ae7df3030a9\") " pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.149257 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e9844022c74747ce7bef1985a3669408-usr-share-ca-certificates\") pod \"kube-controller-manager-custom-flannel-999044\" (UID: \"e9844022c74747ce7bef1985a3669408\") " pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.842482 2220 apiserver.go:52] "Watching apiserver" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.848424 2220 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.893009 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.893109 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.893375 2220 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: E1102 23:24:00.897756 2220 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-custom-flannel-999044\" already exists" pod="kube-system/kube-apiserver-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: E1102 23:24:00.897863 2220 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-custom-flannel-999044\" already exists" pod="kube-system/kube-controller-manager-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: E1102 23:24:00.897862 2220 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-custom-flannel-999044\" already exists" pod="kube-system/etcd-custom-flannel-999044" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.905415 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-custom-flannel-999044" podStartSLOduration=1.905403978 podStartE2EDuration="1.905403978s" podCreationTimestamp="2025-11-02 23:23:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:00.905232651 +0000 UTC m=+1.100796423" watchObservedRunningTime="2025-11-02 23:24:00.905403978 +0000 UTC m=+1.100967746" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.915380 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-custom-flannel-999044" podStartSLOduration=1.915370373 podStartE2EDuration="1.915370373s" podCreationTimestamp="2025-11-02 23:23:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:00.910850858 +0000 UTC m=+1.106414624" watchObservedRunningTime="2025-11-02 23:24:00.915370373 +0000 UTC m=+1.110934140" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.919238 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-custom-flannel-999044" podStartSLOduration=1.9192289580000002 podStartE2EDuration="1.919228958s" podCreationTimestamp="2025-11-02 23:23:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:00.919191563 +0000 UTC m=+1.114755337" watchObservedRunningTime="2025-11-02 23:24:00.919228958 +0000 UTC m=+1.114792716" + Nov 02 23:24:00 custom-flannel-999044 kubelet[2220]: I1102 23:24:00.919305 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-custom-flannel-999044" podStartSLOduration=1.9193027580000002 podStartE2EDuration="1.919302758s" podCreationTimestamp="2025-11-02 23:23:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:00.915459033 +0000 UTC m=+1.111022790" watchObservedRunningTime="2025-11-02 23:24:00.919302758 +0000 UTC m=+1.114866524" + Nov 02 23:24:04 custom-flannel-999044 kubelet[2220]: I1102 23:24:04.935842 2220 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:24:04 custom-flannel-999044 kubelet[2220]: I1102 23:24:04.936299 2220 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581233 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/5e5e7e8f-150e-4748-8407-ec1a43bbac31-xtables-lock\") pod \"kube-proxy-zqtl2\" (UID: \"5e5e7e8f-150e-4748-8407-ec1a43bbac31\") " pod="kube-system/kube-proxy-zqtl2" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581262 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-plugin\" (UniqueName: \"kubernetes.io/host-path/b20e4b11-dd32-4380-8056-f7505c647b08-cni-plugin\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581274 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz5sc\" (UniqueName: \"kubernetes.io/projected/b20e4b11-dd32-4380-8056-f7505c647b08-kube-api-access-qz5sc\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581292 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/5e5e7e8f-150e-4748-8407-ec1a43bbac31-lib-modules\") pod \"kube-proxy-zqtl2\" (UID: \"5e5e7e8f-150e-4748-8407-ec1a43bbac31\") " pod="kube-system/kube-proxy-zqtl2" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581304 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lsjzh\" (UniqueName: \"kubernetes.io/projected/5e5e7e8f-150e-4748-8407-ec1a43bbac31-kube-api-access-lsjzh\") pod \"kube-proxy-zqtl2\" (UID: \"5e5e7e8f-150e-4748-8407-ec1a43bbac31\") " pod="kube-system/kube-proxy-zqtl2" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581313 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni\" (UniqueName: \"kubernetes.io/host-path/b20e4b11-dd32-4380-8056-f7505c647b08-cni\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581322 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/b20e4b11-dd32-4380-8056-f7505c647b08-xtables-lock\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581333 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/5e5e7e8f-150e-4748-8407-ec1a43bbac31-kube-proxy\") pod \"kube-proxy-zqtl2\" (UID: \"5e5e7e8f-150e-4748-8407-ec1a43bbac31\") " pod="kube-system/kube-proxy-zqtl2" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581342 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/b20e4b11-dd32-4380-8056-f7505c647b08-run\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:05 custom-flannel-999044 kubelet[2220]: I1102 23:24:05.581369 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flannel-cfg\" (UniqueName: \"kubernetes.io/configmap/b20e4b11-dd32-4380-8056-f7505c647b08-flannel-cfg\") pod \"kube-flannel-ds-7psrd\" (UID: \"b20e4b11-dd32-4380-8056-f7505c647b08\") " pod="kube-flannel/kube-flannel-ds-7psrd" + Nov 02 23:24:09 custom-flannel-999044 kubelet[2220]: I1102 23:24:09.714084 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-zqtl2" podStartSLOduration=4.714069583 podStartE2EDuration="4.714069583s" podCreationTimestamp="2025-11-02 23:24:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:08.927767171 +0000 UTC m=+9.123330948" watchObservedRunningTime="2025-11-02 23:24:09.714069583 +0000 UTC m=+9.909633488" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.595845 2220 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.610284 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-flannel/kube-flannel-ds-7psrd" podStartSLOduration=5.576483434 podStartE2EDuration="21.610271086s" podCreationTimestamp="2025-11-02 23:24:05 +0000 UTC" firstStartedPulling="2025-11-02 23:24:08.127398765 +0000 UTC m=+8.322962532" lastFinishedPulling="2025-11-02 23:24:24.161186426 +0000 UTC m=+24.356750184" observedRunningTime="2025-11-02 23:24:26.010258389 +0000 UTC m=+26.205822160" watchObservedRunningTime="2025-11-02 23:24:26.610271086 +0000 UTC m=+26.805834851" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638553 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96frb\" (UniqueName: \"kubernetes.io/projected/e1b7ebe2-46c6-4042-b843-2f198af0d297-kube-api-access-96frb\") pod \"coredns-66bc5c9577-hcgf7\" (UID: \"e1b7ebe2-46c6-4042-b843-2f198af0d297\") " pod="kube-system/coredns-66bc5c9577-hcgf7" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638590 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/bccd5c30-a6d1-43b9-8fbe-c052c1ab0357-tmp\") pod \"storage-provisioner\" (UID: \"bccd5c30-a6d1-43b9-8fbe-c052c1ab0357\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638608 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltbgc\" (UniqueName: \"kubernetes.io/projected/bccd5c30-a6d1-43b9-8fbe-c052c1ab0357-kube-api-access-ltbgc\") pod \"storage-provisioner\" (UID: \"bccd5c30-a6d1-43b9-8fbe-c052c1ab0357\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:26 custom-flannel-999044 kubelet[2220]: I1102 23:24:26.638623 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e1b7ebe2-46c6-4042-b843-2f198af0d297-config-volume\") pod \"coredns-66bc5c9577-hcgf7\" (UID: \"e1b7ebe2-46c6-4042-b843-2f198af0d297\") " pod="kube-system/coredns-66bc5c9577-hcgf7" + Nov 02 23:24:28 custom-flannel-999044 kubelet[2220]: I1102 23:24:28.023216 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-hcgf7" podStartSLOduration=22.023202754 podStartE2EDuration="22.023202754s" podCreationTimestamp="2025-11-02 23:24:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:28.02294131 +0000 UTC m=+28.218505093" watchObservedRunningTime="2025-11-02 23:24:28.023202754 +0000 UTC m=+28.218766520" + Nov 02 23:24:28 custom-flannel-999044 kubelet[2220]: I1102 23:24:28.033008 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=23.032994939 podStartE2EDuration="23.032994939s" podCreationTimestamp="2025-11-02 23:24:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:28.032857836 +0000 UTC m=+28.228421607" watchObservedRunningTime="2025-11-02 23:24:28.032994939 +0000 UTC m=+28.228558715" + Nov 02 23:24:30 custom-flannel-999044 kubelet[2220]: I1102 23:24:30.358185 2220 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfjmt\" (UniqueName: \"kubernetes.io/projected/4e902ac7-c590-4309-bd14-7af633f026b5-kube-api-access-kfjmt\") pod \"netcat-cd4db9dbf-ntqvl\" (UID: \"4e902ac7-c590-4309-bd14-7af633f026b5\") " pod="default/netcat-cd4db9dbf-ntqvl" + Nov 02 23:24:32 custom-flannel-999044 kubelet[2220]: I1102 23:24:32.051650 2220 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-ntqvl" podStartSLOduration=1.2060038 podStartE2EDuration="2.051637139s" podCreationTimestamp="2025-11-02 23:24:30 +0000 UTC" firstStartedPulling="2025-11-02 23:24:30.673666919 +0000 UTC m=+30.869230670" lastFinishedPulling="2025-11-02 23:24:31.519300242 +0000 UTC m=+31.714864009" observedRunningTime="2025-11-02 23:24:32.051450868 +0000 UTC m=+32.247014642" watchObservedRunningTime="2025-11-02 23:24:32.051637139 +0000 UTC m=+32.247200903" + Nov 02 23:24:43 custom-flannel-999044 kubelet[2220]: E1102 23:24:43.950634 2220 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:55560->[::1]:43357: write tcp [::1]:55560->[::1]:43357: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.85.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:custom-flannel-999044 + name: system:node:custom-flannel-999044@mk + current-context: system:node:custom-flannel-999044@mk + kind: Config + users: + - name: system:node:custom-flannel-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: calico-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.85.2:8443 + name: custom-flannel-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: false-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kindnet-999044 + contexts: + - context: + cluster: calico-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: calico-999044 + name: calico-999044 + - context: + cluster: custom-flannel-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: custom-flannel-999044 + name: custom-flannel-999044 + - context: + cluster: false-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: false-999044 + name: false-999044 + - context: + cluster: kindnet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kindnet-999044 + name: kindnet-999044 + current-context: false-999044 + kind: Config + users: + - name: calico-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.key + - name: custom-flannel-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.key + - name: false-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.key + - name: kindnet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:05Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "336" + uid: 0ca92ad4-0c0c-43fe-944d-7f76aca11307 + - apiVersion: v1 + data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } + kind: ConfigMap + metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"cni-conf.json":"{\n \"name\": \"cbr0\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n}\n","net-conf.json":"{\n \"Network\": \"10.244.0.0/16\",\n \"Backend\": {\n \"Type\": \"vxlan\"\n }\n}\n"},"kind":"ConfigMap","metadata":{"annotations":{},"labels":{"app":"flannel","tier":"node"},"name":"kube-flannel-cfg","namespace":"kube-flannel"}} + creationTimestamp: "2025-11-02T23:24:00Z" + labels: + app: flannel + tier: node + name: kube-flannel-cfg + namespace: kube-flannel + resourceVersion: "267" + uid: 3ab93400-0348-4688-9a4c-29c7afe59a3b + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:05Z" + name: kube-root-ca.crt + namespace: kube-flannel + resourceVersion: "337" + uid: b233f4de-ea79-4c39-9417-c20bb5ee57d0 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:05Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "338" + uid: 45e592ae-e514-4b51-a4f6-0a79a69ffc99 + - apiVersion: v1 + data: + jws-kubeconfig-zsg3gn: eyJhbGciOiJIUzI1NiIsImtpZCI6InpzZzNnbiJ9..2AmyaXFJuIRMDZ9rhv9bMwYqenXsTCRAShrJFb1JUFo + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:59Z" + name: cluster-info + namespace: kube-public + resourceVersion: "348" + uid: e39a845c-a8a0-4e00-ba84-63f7c981e8a3 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:05Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "339" + uid: 9c2f9229-4a14-4b6e-bd16-f8688bf18ceb + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.85.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:59Z" + name: coredns + namespace: kube-system + resourceVersion: "306" + uid: 566f7224-eb0c-48cc-9b15-fc417ba5f56c + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIId1Gf29skXQwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMxODUyWhcNMzUxMDMxMjMyMzUy + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAM8THyfwWMseDN67gRuG2C/55xbCVYUB1PuXxnFhUSCYeQJ4 + hu3QaA20eTu35Oim3H7SIXAjjTP7Vem0q5CzIs4RaBgIbw6xYLTeyTwXuZmssoaH + dpbVUmduF57hDfq7ZEqimD4D0aSmxYmTD9HyX9qSbAsJMvUoLj71oekyzW19gNQA + ZzAr/SL8Rs93nBO95K0BDSESEBqtqdrCpKxTpm4dBxgcCCW9Cl6piaUanS0MC9tB + tI5M9anUVM6mkajvBBjGZ0CLmURvCvw0o/YliWPV5aen80cpvX1je4iCH5fBS5xd + LVdmiL+70ndJ0UjpIrjsxqe6MxRxd9+wyhgUTvcCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFCgA/pHwV9mVUTeDiz+ + nbQ/a45fMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAk+dpaGNtihKdKNEEx1YSn8o5tsJ15R6MByMfrazc9FJGjj+/SQaWoPb3a + emJR3Vr5lYfzuq6wx/VzZdOZ9DIlWv90ju65MfCYgzUCgbIk4CdHDteu9ko1SXrw + LyUpyCGpSaRK/IX1aK8cCBH6kIF46dRRagSjLjXrIZA+kEOwsucOOCVr24qBS89u + xCDCZUwgL53AqQiyMbBYUG99YSSV1Yp7YE+2R4QhOfzX6MM0VOuFYh4KUviLILQW + spD/pUWZGB4g2oswqg2B2jQpAcM0F1cmKyKTV5C+o/9DUZKlCpzyM1xrI1DjIZ8q + HSKNeTZGQk85QeOzdO7Q8Gbi7kEk + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:57Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "37" + uid: ddef9828-68f3-48a9-a994-3aee9217bbb8 + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "33" + uid: a3bf0b88-cbaf-4687-9f0a-313700221f49 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:00Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "253" + uid: e077da3d-5654-402d-8ef4-dbb085f82dc0 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:05Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "335" + uid: e8d1adeb-566b-43a5-9b62-24a46bd21ec9 + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.85.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:59Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "207" + uid: e255a811-8d68-46b9-b25a-da9bfaa729a4 + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:59Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "210" + uid: 6cbdaecb-7681-47aa-86b8-8372935fc1c4 + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://custom-flannel-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:51 UTC; 56s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1050 (dockerd) + Tasks: 14 + Memory: 243.2M + CPU: 3.632s + CGroup: /system.slice/docker.service + └─1050 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.173937772Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.173976864Z" level=info msg="Initializing buildkit" + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.186661508Z" level=info msg="Completed buildkit initialization" + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.190487010Z" level=info msg="Daemon has completed initialization" + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.190555995Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.190555993Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:23:51 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:23:51.190579995Z" level=info msg="API listen on [::]:2376" + Nov 02 23:23:51 custom-flannel-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:24:12 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:24:12.449718177Z" level=info msg="ignoring event" container=3044066f312b2f1f7b48eb421b616b837b97d779b96538b032a5d9e49d06a614 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:24 custom-flannel-999044 dockerd[1050]: time="2025-11-02T23:24:24.217958184Z" level=info msg="ignoring event" container=a3c39da6d3ded93212e61c228e169aacac05678f6f3b1d77b75b0c001ffdaa13 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://custom-flannel-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 20 + Running: 18 + Paused: 0 + Stopped: 2 + Images: 11 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: custom-flannel-999044 + ID: 9ad03cb6-c7d8-45d0-baa7-d38507a7534c + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://custom-flannel-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://custom-flannel-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:51 UTC; 57s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1358 (cri-dockerd) + Tasks: 13 + Memory: 18.1M + CPU: 647ms + CGroup: /system.slice/cri-docker.service + └─1358 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:24:08 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:08Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/03f2f27f2ddfdc2036db3ef2e397b006f81afae36ea8ae248554e8930d74b7b5/resolv.conf as [nameserver 192.168.85.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:12 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:12Z" level=info msg="Stop pulling image docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0: Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0" + Nov 02 23:24:23 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:23Z" level=info msg="Pulling image docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2: 2fb311c974da: Pull complete " + Nov 02 23:24:24 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:24Z" level=info msg="Stop pulling image docker.io/rancher/mirrored-flannelcni-flannel:v0.20.2: Status: Downloaded newer image for rancher/mirrored-flannelcni-flannel:v0.20.2" + Nov 02 23:24:27 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/7ddd9f8d25633189c35f0f7e8c3d323dfcb1ec3be2a5c45cf160ec4015ff4003/resolv.conf as [nameserver 192.168.85.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:27 custom-flannel-999044 cri-dockerd[1358]: time="2025-11-02T23:24:27Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/22b021222b54303720d4297fc10fbc0d61a38bb6b020fe7a27694c18d2cda8d4/resolv.conf as [nameserver 192.168.85.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:27 custom-flannel-999044 cri-dockerd[1358]: map[string]interface {}{"cniVersion":"0.3.1", "hairpinMode":true, "ipMasq":false, "ipam":map[string]interface {}{"ranges":[][]map[string]interface {}{[]map[string]interface {}{map[string]interface {}{"subnet":"10.244.0.0/24"}}}, "routes":[]types.Route{types.Route{Dst:net.IPNet{IP:net.IP{0xa, 0xf4, 0x0, 0x0}, Mask:net.IPMask{0xff, 0xff, 0x0, 0x0}}, GW:net.IP(nil)}}, "type":"host-local"}, "isDefaultGateway":true, "isGateway":true, "mtu":(*uint)(0xc000014958), "name":"cbr0", "type":"bridge"} + Nov 02 23:24:30 custom-flannel-999044 cri-dockerd[1358]: {"cniVersion":"0.3.1","hairpinMode":true,"ipMasq":false,"ipam":{"ranges":[[{"subnet":"10.244.0.0/24"}]],"routes":[{"dst":"10.244.0.0/16"}],"type":"host-local"},"isDefaultGateway":true,"isGateway":true,"mtu":1450,"name":"cbr0","type":"bridge"}time="2025-11-02T23:24:30Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/55a0ede2ac7de54b8f5e434719fa6b428c8c660cca1ab879ebde9b614e3521b1/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:30 custom-flannel-999044 cri-dockerd[1358]: map[string]interface {}{"cniVersion":"0.3.1", "hairpinMode":true, "ipMasq":false, "ipam":map[string]interface {}{"ranges":[][]map[string]interface {}{[]map[string]interface {}{map[string]interface {}{"subnet":"10.244.0.0/24"}}}, "routes":[]types.Route{types.Route{Dst:net.IPNet{IP:net.IP{0xa, 0xf4, 0x0, 0x0}, Mask:net.IPMask{0xff, 0xff, 0x0, 0x0}}, GW:net.IP(nil)}}, "type":"host-local"}, "isDefaultGateway":true, "isGateway":true, "mtu":(*uint)(0xc0000b48e8), "name":"cbr0", "type":"bridge"} + Nov 02 23:24:31 custom-flannel-999044 cri-dockerd[1358]: {"cniVersion":"0.3.1","hairpinMode":true,"ipMasq":false,"ipam":{"ranges":[[{"subnet":"10.244.0.0/24"}]],"routes":[{"dst":"10.244.0.0/16"}],"type":"host-local"},"isDefaultGateway":true,"isGateway":true,"mtu":1450,"name":"cbr0","type":"bridge"}time="2025-11-02T23:24:31Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://custom-flannel-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://custom-flannel-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://custom-flannel-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:50 UTC; 59s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1037 (containerd) + Tasks: 205 + Memory: 102.0M + CPU: 1.149s + CGroup: /system.slice/containerd.service + ├─1037 /usr/bin/containerd + ├─1817 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5251a8cf81038dbdb98c5901fd7721a3bec9229a430eeace6038d7fbd6bfa877 -address /run/containerd/containerd.sock + ├─1819 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ba38a261a975fa66e786fba07c96b77d044633f7bfdd8996cf817049c453cb23 -address /run/containerd/containerd.sock + ├─1833 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 08892eacbe9a48cda55b9b740f6e354f241cfabb426d70f2f9582b3771974e33 -address /run/containerd/containerd.sock + ├─1898 /usr/bin/containerd-shim-runc-v2 -namespace moby -id d6ba6f1e0ab0ba0dd0e878261ca9fee39382575f870df2da5b7396f2e4141940 -address /run/containerd/containerd.sock + ├─1994 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9eb2c19505351d67a9b8cc2eecbc63054a8026af256cf8ba5492202bf56bb16b -address /run/containerd/containerd.sock + ├─1995 /usr/bin/containerd-shim-runc-v2 -namespace moby -id bce100d50209a3e6d902733bd95c02c0d048d817a2544465fa1d818822479d1e -address /run/containerd/containerd.sock + ├─2024 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 05259b19afcc14a9489c8f95c4f377ee08dfc0b25e2848e05f750649dcccf701 -address /run/containerd/containerd.sock + ├─2072 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 68ba2ab023025938c711e11b562ae278157ee48941e23d3832f99b3c6816b846 -address /run/containerd/containerd.sock + ├─2501 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 8f89e7d7960fbd68600a89b950433f4af158fc8d0a5965b36c0406c5defa38a3 -address /run/containerd/containerd.sock + ├─2532 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 03f2f27f2ddfdc2036db3ef2e397b006f81afae36ea8ae248554e8930d74b7b5 -address /run/containerd/containerd.sock + ├─2589 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b0687d9a66f579eecfe51562a01c58852ae295a0974b4a52a42b5b79c8ede5ae -address /run/containerd/containerd.sock + ├─2945 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 4aa184bfd0beb3338c5482f03997f0c457de114724f3f21f4d9f1d5bcdbfb292 -address /run/containerd/containerd.sock + ├─3106 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 7ddd9f8d25633189c35f0f7e8c3d323dfcb1ec3be2a5c45cf160ec4015ff4003 -address /run/containerd/containerd.sock + ├─3112 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 22b021222b54303720d4297fc10fbc0d61a38bb6b020fe7a27694c18d2cda8d4 -address /run/containerd/containerd.sock + ├─3220 /usr/bin/containerd-shim-runc-v2 -namespace moby -id deca544d98354221d2f6bb515a466375d6eb4198f7abe9f96881ffe86b25288b -address /run/containerd/containerd.sock + ├─3261 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9282c71332d4863b516970b512bbde3b25ed1715d6f256670356acfbb11658a4 -address /run/containerd/containerd.sock + ├─3346 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 55a0ede2ac7de54b8f5e434719fa6b428c8c660cca1ab879ebde9b614e3521b1 -address /run/containerd/containerd.sock + └─3496 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 04c6751b67eb2396b5295a5d25cfcf781e497ff21fde6c58a9c509b287e8c07f -address /run/containerd/containerd.sock + + Nov 02 23:23:50 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:23:50.952611132Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:23:50 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:23:50.952617506Z" level=info msg="Start streaming server" + Nov 02 23:23:50 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:23:50.952665956Z" level=info msg="containerd successfully booted in 0.016026s" + Nov 02 23:23:50 custom-flannel-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:24:12 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:12.449691952Z" level=info msg="shim disconnected" id=3044066f312b2f1f7b48eb421b616b837b97d779b96538b032a5d9e49d06a614 namespace=moby + Nov 02 23:24:12 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:12.449728116Z" level=warning msg="cleaning up after shim disconnected" id=3044066f312b2f1f7b48eb421b616b837b97d779b96538b032a5d9e49d06a614 namespace=moby + Nov 02 23:24:12 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:12.449735548Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:24:24 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:24.218245390Z" level=info msg="shim disconnected" id=a3c39da6d3ded93212e61c228e169aacac05678f6f3b1d77b75b0c001ffdaa13 namespace=moby + Nov 02 23:24:24 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:24.218273884Z" level=warning msg="cleaning up after shim disconnected" id=a3c39da6d3ded93212e61c228e169aacac05678f6f3b1d77b75b0c001ffdaa13 namespace=moby + Nov 02 23:24:24 custom-flannel-999044 containerd[1037]: time="2025-11-02T23:24:24.218278584Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://custom-flannel-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://custom-flannel-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://custom-flannel-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:24:51.630909993Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:51.6309405Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:51.630967625Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:24:51.630987093Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:24:51.63103021Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:24:51.631123167Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: custom-flannel-999044 [took: 13.124349798s] -------------------------------- + helpers_test.go:175: Cleaning up "custom-flannel-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p custom-flannel-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p custom-flannel-999044: (3.150794777s) +=== CONT TestNetworkPlugins/group/flannel +=== RUN TestNetworkPlugins/group/flannel/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p flannel-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker --container-runtime=docker + net_test.go:211: + ----------------------- debugLogs start: kindnet-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-mssqr + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.94.2 kindnet-999044 + 192.168.94.1 host.minikube.internal + 192.168.94.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.94.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/kindnet-999044 Ready control-plane 57s v1.34.1 192.168.94.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 55s + default service/netcat ClusterIP 10.96.100.203 8080/TCP 14s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 54s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.94.2:8443 55s + default endpoints/netcat 10.244.0.3:8080 14s + kube-system endpoints/k8s.io-minikube-hostpath 49s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 49s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kindnet 1 1 1 1 1 53s kindnet-cni docker.io/kindest/kindnetd:v20250512-df8de77b app=kindnet + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 54s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 14s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 54s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-mssqr 1/1 Running 0 14s 10.244.0.3 kindnet-999044 + kube-system pod/coredns-66bc5c9577-ggj62 1/1 Running 0 48s 10.244.0.2 kindnet-999044 + kube-system pod/etcd-kindnet-999044 1/1 Running 0 54s 192.168.94.2 kindnet-999044 + kube-system pod/kindnet-kt8qf 1/1 Running 0 48s 192.168.94.2 kindnet-999044 + kube-system pod/kube-apiserver-kindnet-999044 1/1 Running 0 54s 192.168.94.2 kindnet-999044 + kube-system pod/kube-controller-manager-kindnet-999044 1/1 Running 0 54s 192.168.94.2 kindnet-999044 + kube-system pod/kube-proxy-mt7ll 1/1 Running 0 48s 192.168.94.2 kindnet-999044 + kube-system pod/kube-scheduler-kindnet-999044 1/1 Running 0 54s 192.168.94.2 kindnet-999044 + kube-system pod/storage-provisioner 1/1 Running 0 49s 192.168.94.2 kindnet-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 446fbb1f75446 15 seconds ago Ready netcat-cd4db9dbf-mssqr default 0 (default) + 727a593bf8a96 24 seconds ago Ready coredns-66bc5c9577-ggj62 kube-system 0 (default) + 0df6c714c264f 24 seconds ago Ready storage-provisioner kube-system 0 (default) + ca022d863f380 49 seconds ago Ready kube-proxy-mt7ll kube-system 0 (default) + 368f30f21d30e 49 seconds ago Ready kindnet-kt8qf kube-system 0 (default) + 4a08642209286 59 seconds ago Ready kube-scheduler-kindnet-999044 kube-system 0 (default) + 45c03f76c464b 59 seconds ago Ready kube-controller-manager-kindnet-999044 kube-system 0 (default) + a1f5d8ad4fe9b 59 seconds ago Ready kube-apiserver-kindnet-999044 kube-system 0 (default) + 459af41f63e28 59 seconds ago Ready etcd-kindnet-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + fdd544602bbe5 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 14 seconds ago Running dnsutils 0 446fbb1f75446 netcat-cd4db9dbf-mssqr default + 9a8bd78558ee0 52546a367cc9e 24 seconds ago Running coredns 0 727a593bf8a96 coredns-66bc5c9577-ggj62 kube-system + 900557d3b6a31 6e38f40d628db 24 seconds ago Running storage-provisioner 0 0df6c714c264f storage-provisioner kube-system + c5017ef31df11 kindest/kindnetd@sha256:07a4b3fe0077a0ae606cc0a200fc25a28fa64dcc30b8d311b461089969449f9a 39 seconds ago Running kindnet-cni 0 368f30f21d30e kindnet-kt8qf kube-system + a37c0429f9cde fc25172553d79 49 seconds ago Running kube-proxy 0 ca022d863f380 kube-proxy-mt7ll kube-system + 1c4d707cd496d c80c8dbafe7dd 59 seconds ago Running kube-controller-manager 0 45c03f76c464b kube-controller-manager-kindnet-999044 kube-system + 60ec6b14b48e6 7dd6aaa1717ab 59 seconds ago Running kube-scheduler 0 4a08642209286 kube-scheduler-kindnet-999044 kube-system + 31cac75a88ef1 5f1f5298c888d 59 seconds ago Running etcd 0 459af41f63e28 etcd-kindnet-999044 kube-system + 04b2427acfd48 c3994bc696102 59 seconds ago Running kube-apiserver 0 a1f5d8ad4fe9b kube-apiserver-kindnet-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:24:32 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 15s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-mssqr + Namespace: default + Priority: 0 + Service Account: default + Node: kindnet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:24:32 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://fdd544602bbe5368695b6cab70d31f44008ebcc40dc1dba7bc8fc50171d02289 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:24:34 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-67ncn (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-67ncn: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15s default-scheduler Successfully assigned default/netcat-cd4db9dbf-mssqr to kindnet-999044 + Normal Pulling 15s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 14s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 820ms (820ms including waiting). Image size: 127004766 bytes. + Normal Created 13s kubelet Created container: dnsutils + Normal Started 13s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:23:52 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 49s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 49s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-ggj62 + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: kindnet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:24:23 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://9a8bd78558ee0b5f0a5b6e802e549eb0190410bf5010eb1ad83e11d5b03b9df4 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:24:23 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wp96t (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-wp96t: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 49s default-scheduler 0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling. + Normal Scheduled 24s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-ggj62 to kindnet-999044 + Normal Pulled 24s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 24s kubelet Created container: coredns + Normal Started 24s kubelet Started container coredns + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + .:53 + [INFO] plugin/reload: Running configuration SHA512 = c7556d8fdf49c5e32a9077be8cfb9fc6947bb07e663a10d55b192eb63ad1f2bd9793e8e5f5a36fc9abb1957831eec5c997fd9821790e3990ae9531bf41ecea37 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:43178 - 21058 "HINFO IN 2648703686081847570.4141261149925191424. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.032670468s + [INFO] 10.244.0.3:46831 - 40014 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000154689s + [INFO] 10.244.0.3:36741 - 48387 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000157914s + [INFO] 10.244.0.3:44430 - 24694 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000099836s + [INFO] 10.244.0.3:42306 - 29458 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000196433s + [INFO] 10.244.0.3:42306 - 29747 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000431762s + [INFO] 10.244.0.3:38746 - 25517 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000171897s + [INFO] 10.244.0.3:48252 - 32906 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000134536s + [INFO] 10.244.0.3:55965 - 22630 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000102235s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-kindnet-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: kindnet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:23:52 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.94.2:8443 + kubernetes.io/config.hash: 81f7c300fd2c46bef712fce7b0b18718 + kubernetes.io/config.mirror: 81f7c300fd2c46bef712fce7b0b18718 + kubernetes.io/config.seen: 2025-11-02T23:23:52.291564766Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.94.2 + IPs: + IP: 192.168.94.2 + Controlled By: Node/kindnet-999044 + Containers: + kube-apiserver: + Container ID: docker://04b2427acfd48e80efac3047574fa3bbc9cce83d2b06cb69dbb884a76a76a28a + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.94.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:23:48 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.94.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.94.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.94.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Pulled 59s kubelet Container image "registry.k8s.io/kube-apiserver:v1.34.1" already present on machine + Normal Created 59s kubelet Created container: kube-apiserver + Normal Started 59s kubelet Started container kube-apiserver + + + >>> k8s: api server logs: + I1102 23:23:48.481801 1 options.go:263] external host was not specified, using 192.168.94.2 + I1102 23:23:48.483622 1 server.go:150] Version: v1.34.1 + I1102 23:23:48.483637 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:23:48.868970 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:23:48.868993 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:23:48.868998 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:23:48.869001 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:23:48.869004 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:23:48.869007 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:23:48.869009 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:23:48.869011 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:23:48.869013 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:23:48.869016 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:23:48.869018 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:23:48.869020 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:23:48.878746 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:48.879025 1 logging.go:55] [core] [Channel #1 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:48.879513 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:23:48.888295 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:48.892601 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:23:48.892641 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:23:48.892864 1 instance.go:239] Using reconciler: lease + W1102 23:23:48.893459 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.310220 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:23:49.327738 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:23:49.327757 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.333278 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:23:49.333967 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.341513 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.346520 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.352868 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.362545 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.371375 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.377214 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.384694 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.390342 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.397219 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.403305 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.412240 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.416263 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.422901 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.428808 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.433306 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.437837 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.457903 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:23:49.458160 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:23:49.458931 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.462484 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.466482 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.472096 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.478470 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.482132 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.486074 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.494560 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.499764 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.504312 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.508190 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.513672 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.517432 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.521222 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.526253 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.532575 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.537062 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.542497 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.546849 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.551393 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.556639 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.562695 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.568433 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.573113 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.577007 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:23:49.577955 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.581780 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.587101 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.590861 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.594698 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.599376 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.604631 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.608859 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.614354 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.617692 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.623756 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.630079 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.636204 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:23:49.649127 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.652841 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:23:49.656330 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.676080 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:23:49.676098 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.676103 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.680460 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:23:49.680484 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.681607 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:23:49.682110 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:23:49.682128 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:23:49.682133 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:23:49.683074 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:23:49.683096 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:23:49.683754 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:23:49.683776 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.683780 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.684234 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:23:49.684252 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.684256 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:23:49.684676 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:23:49.684697 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.688009 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:23:49.688024 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.688318 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:23:49.688322 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.688325 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.688821 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:23:49.688827 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:23:49.690157 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:23:49.690243 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.690271 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.690615 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:23:49.690672 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.690690 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.692562 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:23:49.692653 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.692693 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.693786 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:23:49.693796 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:23:49.693800 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:23:49.693802 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.696297 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:23:49.696315 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:23:49.696318 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:23:49.697702 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:23:49.697712 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.697716 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:23:49.698132 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:23:49.698176 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.698238 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:23:49.699908 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:23:49.700017 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:23:49.700047 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:23:49.702010 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:23:49.705477 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:23:49.705541 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:23:49.941323 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:49.941324 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:49.941527 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:23:49.941712 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:23:49.941777 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:23:49.941811 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:23:49.941832 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:23:49.941874 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:23:49.941894 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:23:49.941927 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:23:49.941937 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:23:49.942112 1 controller.go:142] Starting OpenAPI controller + I1102 23:23:49.942122 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:23:49.942136 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:23:49.942147 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:23:49.942156 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:23:49.942166 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:23:49.942174 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:23:49.942186 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:23:49.942188 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:23:49.942194 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:23:49.942205 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:23:49.942212 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:23:49.942224 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:23:49.942227 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:23:49.942240 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:23:49.942243 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:23:49.942310 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:23:49.942580 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:23:49.942585 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:23:49.942227 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:23:49.943126 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:23:49.943601 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:23:49.943735 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:23:49.943851 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:23:49.943866 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:23:49.946859 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:23:49.946932 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:23:49.979904 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:23:49.989043 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:23:49.989057 1 policy_source.go:240] refreshing policies + I1102 23:23:50.042338 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:23:50.042361 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:23:50.042365 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:23:50.042387 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:23:50.042392 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:23:50.042402 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:23:50.042410 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:23:50.042419 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:23:50.042433 1 aggregator.go:171] initial CRD sync complete... + I1102 23:23:50.042437 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:23:50.042440 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:23:50.042443 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:23:50.042840 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:23:50.043286 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:23:50.043303 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:23:50.043909 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:23:50.044408 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:23:50.045611 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:23:50.045619 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:50.048865 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:50.049408 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:23:50.053235 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:23:50.946348 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:23:50.948533 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:23:50.948553 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:23:51.220006 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:23:51.241858 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:23:51.346815 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:23:51.349974 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.94.2] + I1102 23:23:51.350526 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:23:51.352652 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:23:51.972746 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:23:52.543371 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:23:52.547693 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:23:52.551904 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:23:57.985940 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:57.988007 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:23:58.024501 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:23:58.076454 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:24:32.466979 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.96.100.203"} + E1102 23:24:40.569726 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40706: use of closed network connection + E1102 23:24:40.652048 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40724: use of closed network connection + E1102 23:24:40.734131 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40744: use of closed network connection + E1102 23:24:45.907128 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40792: use of closed network connection + E1102 23:24:45.979747 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40812: use of closed network connection + E1102 23:24:46.058528 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40836: use of closed network connection + E1102 23:24:46.130064 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40858: use of closed network connection + E1102 23:24:46.198880 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:40876: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + /etc/cni/net.d/10-kindnet.conflist + + { + "cniVersion": "0.3.1", + "name": "kindnet", + "plugins": [ + { + "type": "ptp", + "ipMasq": false, + "ipam": { + "type": "host-local", + "dataDir": "/run/cni-ipam-state", + "routes": [ + + + { "dst": "0.0.0.0/0" } + ], + "ranges": [ + + + [ { "subnet": "10.244.0.0/24" } ] + ] + } + , + "mtu": 1500 + + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0@if326: mtu 1500 qdisc noqueue state UP group default + link/ether 0a:63:64:01:eb:70 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.94.2/24 brd 192.168.94.255 scope global eth0 + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 86:34:f1:2d:3c:07 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 4: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 5: veth76147524@if3: mtu 1500 qdisc noqueue state UP group default + link/ether ee:0e:37:6d:7b:97 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet 10.244.0.1/32 scope global veth76147524 + valid_lft forever preferred_lft forever + inet6 fe80::ec0e:37ff:fe6d:7b97/64 scope link + valid_lft forever preferred_lft forever + 6: veth1a144922@if3: mtu 1500 qdisc noqueue state UP group default + link/ether e6:e8:56:7d:79:b2 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet 10.244.0.1/32 scope global veth1a144922 + valid_lft forever preferred_lft forever + inet6 fe80::e4e8:56ff:fe7d:79b2/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.94.1 dev eth0 + 10.244.0.2 dev veth76147524 scope host + 10.244.0.3 dev veth1a144922 scope host + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.94.0/24 dev eth0 proto kernel scope link src 192.168.94.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:48 2025 + *mangle + :PREROUTING ACCEPT [29631:101019646] + :INPUT ACCEPT [29596:101016848] + :FORWARD ACCEPT [35:2798] + :OUTPUT ACCEPT [20881:7718154] + :POSTROUTING ACCEPT [20916:7720952] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:24:48 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:48 2025 + *filter + :INPUT ACCEPT [4393:1058341] + :FORWARD ACCEPT [16:927] + :OUTPUT ACCEPT [4343:1367703] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:48 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:48 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [58:3480] + :POSTROUTING ACCEPT [67:4155] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KIND-MASQ-AGENT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-RS6KXC4SAKLFSCFF - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.94.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.94.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.94.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -m addrtype ! --dst-type LOCAL -m comment --comment "kind-masq-agent: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom KIND-MASQ-AGENT chain" -j KIND-MASQ-AGENT + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.94.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:44087 + -A DOCKER_OUTPUT -d 192.168.94.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:41172 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 44087 -j SNAT --to-source 192.168.94.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 41172 -j SNAT --to-source 192.168.94.1:53 + -A KIND-MASQ-AGENT -d 10.244.0.0/16 -m comment --comment "kind-masq-agent: local traffic is not subject to MASQUERADE" -j RETURN + -A KIND-MASQ-AGENT -m comment --comment "kind-masq-agent: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-RS6KXC4SAKLFSCFF -s 192.168.94.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-RS6KXC4SAKLFSCFF -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.94.2:8443 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.96.100.203/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.94.2:8443" -j KUBE-SEP-RS6KXC4SAKLFSCFF + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.96.100.203/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:24:48 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 55 3460 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.94.1 + 46 2760 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 58 packets, 3480 bytes) + pkts bytes target prot opt in out source destination + 665 56448 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 486 47010 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.94.1 + 98 5880 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 67 packets, 4155 bytes) + pkts bytes target prot opt in out source destination + 675 57183 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.94.1 + 46 2895 KIND-MASQ-AGENT 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type !LOCAL /* kind-masq-agent: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom KIND-MASQ-AGENT chain */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.94.1 tcp dpt:53 to:127.0.0.11:44087 + 487 47095 DNAT 17 -- * * 0.0.0.0/0 192.168.94.1 udp dpt:53 to:127.0.0.11:41172 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:44087 to:192.168.94.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:41172 to:192.168.94.1:53 + + Chain KIND-MASQ-AGENT (1 references) + pkts bytes target prot opt in out source destination + 12 855 RETURN 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* kind-masq-agent: local traffic is not subject to MASQUERADE */ + 34 2040 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kind-masq-agent: outbound traffic is subject to MASQUERADE (must be last in chain) */ + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 67 4155 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-RS6KXC4SAKLFSCFF (1 references) + pkts bytes target prot opt in out source destination + 5 300 KUBE-MARK-MASQ 0 -- * * 192.168.94.2 0.0.0.0/0 /* default/kubernetes:https */ + 8 480 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.94.2:8443 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.96.100.203 /* default/netcat cluster IP */ tcp dpt:8080 + 94 5640 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 5 300 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 480 KUBE-SEP-RS6KXC4SAKLFSCFF 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.94.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.100.203 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe kindnet daemon set: + Name: kindnet + Namespace: kube-system + Selector: app=kindnet + Node-Selector: + Labels: app=kindnet + k8s-app=kindnet + tier=node + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: app=kindnet + k8s-app=kindnet + tier=node + Service Account: kindnet + Containers: + kindnet-cni: + Image: docker.io/kindest/kindnetd:v20250512-df8de77b + Port: + Host Port: + Limits: + cpu: 100m + memory: 50Mi + Requests: + cpu: 100m + memory: 50Mi + Environment: + HOST_IP: (v1:status.hostIP) + POD_IP: (v1:status.podIP) + POD_SUBNET: 10.244.0.0/16 + Mounts: + /etc/cni/net.d from cni-cfg (rw) + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + Volumes: + cni-cfg: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: DirectoryOrCreate + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Node-Selectors: + Tolerations: :NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 50s daemonset-controller Created pod: kindnet-kt8qf + + + >>> k8s: describe kindnet pod(s): + Name: kindnet-kt8qf + Namespace: kube-system + Priority: 0 + Service Account: kindnet + Node: kindnet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:23:58 +0000 + Labels: app=kindnet + controller-revision-hash=78f866cbfd + k8s-app=kindnet + pod-template-generation=1 + tier=node + Annotations: + Status: Running + IP: 192.168.94.2 + IPs: + IP: 192.168.94.2 + Controlled By: DaemonSet/kindnet + Containers: + kindnet-cni: + Container ID: docker://c5017ef31df1160aba2511e0da59d60dca70617a50def1330313d22ef1cdedbc + Image: docker.io/kindest/kindnetd:v20250512-df8de77b + Image ID: docker-pullable://kindest/kindnetd@sha256:07a4b3fe0077a0ae606cc0a200fc25a28fa64dcc30b8d311b461089969449f9a + Port: + Host Port: + State: Running + Started: Sun, 02 Nov 2025 23:24:08 +0000 + Ready: True + Restart Count: 0 + Limits: + cpu: 100m + memory: 50Mi + Requests: + cpu: 100m + memory: 50Mi + Environment: + HOST_IP: (v1:status.hostIP) + POD_IP: (v1:status.podIP) + POD_SUBNET: 10.244.0.0/16 + Mounts: + /etc/cni/net.d from cni-cfg (rw) + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-z57cj (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + cni-cfg: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: DirectoryOrCreate + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-z57cj: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Guaranteed + Node-Selectors: + Tolerations: :NoSchedule op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 50s default-scheduler Successfully assigned kube-system/kindnet-kt8qf to kindnet-999044 + Normal Pulling 50s kubelet Pulling image "docker.io/kindest/kindnetd:v20250512-df8de77b" + Normal Pulled 40s kubelet Successfully pulled image "docker.io/kindest/kindnetd:v20250512-df8de77b" in 9.306s (9.306s including waiting). Image size: 107973738 bytes. + Normal Created 40s kubelet Created container: kindnet-cni + Normal Started 40s kubelet Started container kindnet-cni + + + >>> k8s: kindnet container(s) logs (current): + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:08.503238 1 metrics.go:72] Registering metrics + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:08.503277 1 controller.go:711] "Syncing nftables rules" + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:18.381975 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}] + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:18.382025 1 main.go:301] handling current node + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:28.377168 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}] + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:28.377203 1 main.go:301] handling current node + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:38.377151 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}] + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:38.377181 1 main.go:301] handling current node + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:48.383911 1 main.go:297] Handling node with IPs: map[192.168.94.2:{}] + [pod/kindnet-kt8qf/kindnet-cni] I1102 23:24:48.383960 1 main.go:301] handling current node + + + >>> k8s: kindnet container(s) logs (previous): + error: previous terminated container "kindnet-cni" in pod "kindnet-kt8qf" not found + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 50s daemonset-controller Created pod: kube-proxy-mt7ll + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-mt7ll + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: kindnet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:23:58 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.94.2 + IPs: + IP: 192.168.94.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://a37c0429f9cdeabc68834c795ca464eb7f5ff27dec9e83506f379911437eaf46 + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:23:58 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4bw6v (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-4bw6v: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 50s default-scheduler Successfully assigned kube-system/kube-proxy-mt7ll to kindnet-999044 + Normal Pulled 50s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 50s kubelet Created container: kube-proxy + Normal Started 50s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:23:58.668279 1 server_linux.go:53] "Using iptables proxy" + I1102 23:23:58.700886 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:23:58.801754 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:23:58.801772 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.94.2"] + E1102 23:23:58.801816 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:23:58.819410 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:23:58.819437 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:23:58.823740 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:23:58.823981 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:23:58.823995 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:23:58.828034 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:23:58.828056 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:23:58.828095 1 config.go:200] "Starting service config controller" + I1102 23:23:58.828102 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:23:58.828111 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:23:58.828113 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:23:58.828217 1 config.go:309] "Starting node config controller" + I1102 23:23:58.828227 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:23:58.828232 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:23:58.928536 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:23:58.928539 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:23:58.928539 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://kindnet-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://kindnet-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:52 UTC; 56s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2244 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 34.4M + CPU: 1.254s + CGroup: /system.slice/kubelet.service + └─2244 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=kindnet-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2 + + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.200468 2244 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.210272 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-kt8qf" podStartSLOduration=15.903372039 podStartE2EDuration="25.210260857s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="2025-11-02 23:23:58.723753303 +0000 UTC m=+6.477118968" lastFinishedPulling="2025-11-02 23:24:08.030642102 +0000 UTC m=+15.784007786" observedRunningTime="2025-11-02 23:24:08.415044746 +0000 UTC m=+16.168410430" watchObservedRunningTime="2025-11-02 23:24:23.210260857 +0000 UTC m=+30.963626544" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384926 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz2j5\" (UniqueName: \"kubernetes.io/projected/941da621-3eb5-47e3-91e8-b4fb716f4886-kube-api-access-bz2j5\") pod \"storage-provisioner\" (UID: \"941da621-3eb5-47e3-91e8-b4fb716f4886\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384951 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/efdeaf6d-98c7-4ab4-bad4-08b7ca189206-config-volume\") pod \"coredns-66bc5c9577-ggj62\" (UID: \"efdeaf6d-98c7-4ab4-bad4-08b7ca189206\") " pod="kube-system/coredns-66bc5c9577-ggj62" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384964 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/941da621-3eb5-47e3-91e8-b4fb716f4886-tmp\") pod \"storage-provisioner\" (UID: \"941da621-3eb5-47e3-91e8-b4fb716f4886\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384978 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp96t\" (UniqueName: \"kubernetes.io/projected/efdeaf6d-98c7-4ab4-bad4-08b7ca189206-kube-api-access-wp96t\") pod \"coredns-66bc5c9577-ggj62\" (UID: \"efdeaf6d-98c7-4ab4-bad4-08b7ca189206\") " pod="kube-system/coredns-66bc5c9577-ggj62" + Nov 02 23:24:24 kindnet-999044 kubelet[2244]: I1102 23:24:24.435098 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=27.435082032 podStartE2EDuration="27.435082032s" podCreationTimestamp="2025-11-02 23:23:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.43486809 +0000 UTC m=+32.188233774" watchObservedRunningTime="2025-11-02 23:24:24.435082032 +0000 UTC m=+32.188447811" + Nov 02 23:24:24 kindnet-999044 kubelet[2244]: I1102 23:24:24.445357 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-ggj62" podStartSLOduration=26.445341358 podStartE2EDuration="26.445341358s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.440038584 +0000 UTC m=+32.193404271" watchObservedRunningTime="2025-11-02 23:24:24.445341358 +0000 UTC m=+32.198707049" + Nov 02 23:24:32 kindnet-999044 kubelet[2244]: I1102 23:24:32.537416 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67ncn\" (UniqueName: \"kubernetes.io/projected/5dd1b35f-9c84-46a3-b6d8-563d845b737c-kube-api-access-67ncn\") pod \"netcat-cd4db9dbf-mssqr\" (UID: \"5dd1b35f-9c84-46a3-b6d8-563d845b737c\") " pod="default/netcat-cd4db9dbf-mssqr" + Nov 02 23:24:34 kindnet-999044 kubelet[2244]: I1102 23:24:34.482363 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-mssqr" podStartSLOduration=1.662241777 podStartE2EDuration="2.482351356s" podCreationTimestamp="2025-11-02 23:24:32 +0000 UTC" firstStartedPulling="2025-11-02 23:24:32.842943044 +0000 UTC m=+40.596308726" lastFinishedPulling="2025-11-02 23:24:33.66305264 +0000 UTC m=+41.416418305" observedRunningTime="2025-11-02 23:24:34.482088326 +0000 UTC m=+42.235454016" watchObservedRunningTime="2025-11-02 23:24:34.482351356 +0000 UTC m=+42.235717041" + + + >>> host: kubelet daemon config: + # ]8;;file://kindnet-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://kindnet-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=kindnet-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:23:43 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:23:43 kindnet-999044 kubelet[1538]: E1102 23:23:43.411196 1538 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:43 kindnet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:43 kindnet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:44 kindnet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:44 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:23:44 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:23:44 kindnet-999044 kubelet[1564]: E1102 23:23:44.270724 1564 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:44 kindnet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:44 kindnet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:44 kindnet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:44 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:23:44 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:23:45 kindnet-999044 kubelet[1724]: E1102 23:23:45.023523 1724 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:45 kindnet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:45 kindnet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:45 kindnet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:45 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:23:45 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:23:45 kindnet-999044 kubelet[1735]: E1102 23:23:45.769318 1735 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:45 kindnet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:45 kindnet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:46 kindnet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:23:46 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:23:46 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:23:46 kindnet-999044 kubelet[1746]: E1102 23:23:46.520826 1746 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:23:46 kindnet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:23:46 kindnet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:23:46 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:23:46 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.088127 1778 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.088170 1778 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.088187 1778 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.088193 1778 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.088346 1778 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.453619 1778 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.94.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.453821 1778 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.455874 1778 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.458963 1778 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.458980 1778 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.459104 1778 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.459115 1778 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"kindnet-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.459207 1778 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.459213 1778 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.459268 1778 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.745677 1778 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.745832 1778 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.745856 1778 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.745875 1778 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.745895 1778 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.746371 1778 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.94.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.746406 1778 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.94.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dkindnet-999044&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.746460 1778 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.746925 1778 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.746948 1778 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: W1102 23:23:47.746983 1778 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.754752 1778 server.go:1262] "Started kubelet" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.754806 1778 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.754825 1778 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.754891 1778 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.755008 1778 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.94.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.94.2:8443: connect: connection refused" event="&Event{ObjectMeta:{kindnet-999044.1874541aacdcace9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:kindnet-999044,UID:kindnet-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:kindnet-999044,},FirstTimestamp:2025-11-02 23:23:47.754732777 +0000 UTC m=+1.026706721,LastTimestamp:2025-11-02 23:23:47.754732777 +0000 UTC m=+1.026706721,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:kindnet-999044,}" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.755630 1778 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756419 1778 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756608 1778 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756674 1778 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.756734 1778 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kindnet-999044\" not found" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756761 1778 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756884 1778 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.756954 1778 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.757520 1778 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.757576 1778 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.757845 1778 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kindnet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.758036 1778 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.94.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.759377 1778 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.766063 1778 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.766142 1778 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.766195 1778 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.767209 1778 policy_none.go:49] "None policy: Start" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.767225 1778 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.767241 1778 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.768030 1778 policy_none.go:47] "Start" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.770593 1778 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.772087 1778 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.772101 1778 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.772116 1778 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.772158 1778 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.772578 1778 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.94.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.800737 1778 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.800826 1778 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.800834 1778 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.801044 1778 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.801700 1778 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.802414 1778 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"kindnet-999044\" not found" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.888486 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.891312 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.901648 1778 kubelet_node_status.go:75] "Attempting to register node" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.901889 1778 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.94.2:8443/api/v1/nodes\": dial tcp 192.168.94.2:8443: connect: connection refused" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.905748 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.907840 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958454 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4e8f06555f24f0985192f0f7630a7aff-etcd-certs\") pod \"etcd-kindnet-999044\" (UID: \"4e8f06555f24f0985192f0f7630a7aff\") " pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958471 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4e8f06555f24f0985192f0f7630a7aff-etcd-data\") pod \"etcd-kindnet-999044\" (UID: \"4e8f06555f24f0985192f0f7630a7aff\") " pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958493 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-usr-local-share-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958505 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-flexvolume-dir\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958521 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-usr-local-share-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958538 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-ca-certs\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958574 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-k8s-certs\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958589 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-ca-certs\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958600 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-etc-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958610 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-k8s-certs\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: E1102 23:23:47.958624 1778 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kindnet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958633 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-kubeconfig\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958686 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/bd055d13dba9dbf96223767faf3d93d5-kubeconfig\") pod \"kube-scheduler-kindnet-999044\" (UID: \"bd055d13dba9dbf96223767faf3d93d5\") " pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958708 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-etc-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958722 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-usr-share-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:47 kindnet-999044 kubelet[1778]: I1102 23:23:47.958732 1778 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-usr-share-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: I1102 23:23:48.102615 1778 kubelet_node_status.go:75] "Attempting to register node" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.102815 1778 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.94.2:8443/api/v1/nodes\": dial tcp 192.168.94.2:8443: connect: connection refused" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.359417 1778 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kindnet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: I1102 23:23:48.504464 1778 kubelet_node_status.go:75] "Attempting to register node" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.801526 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.811059 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.818809 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:48 kindnet-999044 kubelet[1778]: E1102 23:23:48.823895 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:49 kindnet-999044 kubelet[1778]: E1102 23:23:49.826873 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:49 kindnet-999044 kubelet[1778]: E1102 23:23:49.826890 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:49 kindnet-999044 kubelet[1778]: E1102 23:23:49.827061 1778 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:49 kindnet-999044 kubelet[1778]: E1102 23:23:49.968596 1778 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"kindnet-999044\" not found" node="kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.009396 1778 event.go:359] "Server rejected event (will not retry!)" err="namespaces \"default\" not found" event="&Event{ObjectMeta:{kindnet-999044.1874541aacdcace9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:kindnet-999044,UID:kindnet-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:kindnet-999044,},FirstTimestamp:2025-11-02 23:23:47.754732777 +0000 UTC m=+1.026706721,LastTimestamp:2025-11-02 23:23:47.754732777 +0000 UTC m=+1.026706721,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:kindnet-999044,}" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.071580 1778 kubelet_node_status.go:78] "Successfully registered node" node="kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.157840 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.160246 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.160260 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.161196 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.161212 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.162033 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.162043 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.162793 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.751232 1778 apiserver.go:52] "Watching apiserver" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.757106 1778 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.828811 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: I1102 23:23:50.828955 1778 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.830426 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:50 kindnet-999044 kubelet[1778]: E1102 23:23:50.830540 1778 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kindnet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:23:52 kindnet-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:23:52 kindnet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:23:52 kindnet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.270385 2244 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.270438 2244 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.270458 2244 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.270466 2244 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.270649 2244 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.271498 2244 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.272879 2244 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.275547 2244 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278594 2244 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278611 2244 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278746 2244 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278758 2244 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"kindnet-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278844 2244 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278850 2244 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.278868 2244 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.279314 2244 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.279426 2244 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.279436 2244 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.279454 2244 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.279473 2244 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.280021 2244 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.280424 2244 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.280449 2244 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.280986 2244 server.go:1262] "Started kubelet" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.281034 2244 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.281220 2244 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.281517 2244 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284018 2244 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284038 2244 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284426 2244 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284455 2244 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: E1102 23:23:52.284522 2244 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kindnet-999044\" not found" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284807 2244 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.284966 2244 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.287146 2244 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.288596 2244 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.291986 2244 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.292001 2244 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.297595 2244 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.298416 2244 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.298431 2244 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.298442 2244 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: E1102 23:23:52.298477 2244 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308553 2244 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308569 2244 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308585 2244 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308687 2244 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308696 2244 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308709 2244 policy_none.go:49] "None policy: Start" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308716 2244 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308724 2244 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308789 2244 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.308794 2244 policy_none.go:47] "Start" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: E1102 23:23:52.311301 2244 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.311428 2244 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.311440 2244 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.311585 2244 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: E1102 23:23:52.312040 2244 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.399648 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.399730 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.399648 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.399954 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.412781 2244 kubelet_node_status.go:75] "Attempting to register node" node="kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.416012 2244 kubelet_node_status.go:124] "Node was previously registered" node="kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.416077 2244 kubelet_node_status.go:78] "Successfully registered node" node="kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486095 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-etc-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486119 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-usr-local-share-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486136 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-usr-share-ca-certificates\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486154 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-flexvolume-dir\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486168 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-kubeconfig\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486180 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-usr-share-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486193 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-ca-certs\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486205 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-ca-certs\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486216 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-etc-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486253 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-k8s-certs\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486287 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/bd055d13dba9dbf96223767faf3d93d5-kubeconfig\") pod \"kube-scheduler-kindnet-999044\" (UID: \"bd055d13dba9dbf96223767faf3d93d5\") " pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486304 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/81f7c300fd2c46bef712fce7b0b18718-k8s-certs\") pod \"kube-apiserver-kindnet-999044\" (UID: \"81f7c300fd2c46bef712fce7b0b18718\") " pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486321 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b21781c5eddd4dc0a03446dea45c6ad9-usr-local-share-ca-certificates\") pod \"kube-controller-manager-kindnet-999044\" (UID: \"b21781c5eddd4dc0a03446dea45c6ad9\") " pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486333 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/4e8f06555f24f0985192f0f7630a7aff-etcd-certs\") pod \"etcd-kindnet-999044\" (UID: \"4e8f06555f24f0985192f0f7630a7aff\") " pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:52 kindnet-999044 kubelet[2244]: I1102 23:23:52.486344 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/4e8f06555f24f0985192f0f7630a7aff-etcd-data\") pod \"etcd-kindnet-999044\" (UID: \"4e8f06555f24f0985192f0f7630a7aff\") " pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.280171 2244 apiserver.go:52] "Watching apiserver" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.285342 2244 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.323774 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.323775 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.323999 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.324167 2244 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: E1102 23:23:53.330338 2244 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-kindnet-999044\" already exists" pod="kube-system/kube-scheduler-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: E1102 23:23:53.330571 2244 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kindnet-999044\" already exists" pod="kube-system/kube-apiserver-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: E1102 23:23:53.330579 2244 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kindnet-999044\" already exists" pod="kube-system/etcd-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: E1102 23:23:53.331109 2244 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-kindnet-999044\" already exists" pod="kube-system/kube-controller-manager-kindnet-999044" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.339355 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-kindnet-999044" podStartSLOduration=1.339348177 podStartE2EDuration="1.339348177s" podCreationTimestamp="2025-11-02 23:23:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:53.339208362 +0000 UTC m=+1.092574043" watchObservedRunningTime="2025-11-02 23:23:53.339348177 +0000 UTC m=+1.092713855" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.343362 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-kindnet-999044" podStartSLOduration=1.343355044 podStartE2EDuration="1.343355044s" podCreationTimestamp="2025-11-02 23:23:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:53.343178569 +0000 UTC m=+1.096544254" watchObservedRunningTime="2025-11-02 23:23:53.343355044 +0000 UTC m=+1.096720729" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.352239 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-kindnet-999044" podStartSLOduration=1.352228215 podStartE2EDuration="1.352228215s" podCreationTimestamp="2025-11-02 23:23:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:53.347657078 +0000 UTC m=+1.101022768" watchObservedRunningTime="2025-11-02 23:23:53.352228215 +0000 UTC m=+1.105593893" + Nov 02 23:23:53 kindnet-999044 kubelet[2244]: I1102 23:23:53.352301 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-kindnet-999044" podStartSLOduration=1.352298001 podStartE2EDuration="1.352298001s" podCreationTimestamp="2025-11-02 23:23:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:53.352223016 +0000 UTC m=+1.105588703" watchObservedRunningTime="2025-11-02 23:23:53.352298001 +0000 UTC m=+1.105663693" + Nov 02 23:23:57 kindnet-999044 kubelet[2244]: I1102 23:23:57.078249 2244 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:23:57 kindnet-999044 kubelet[2244]: I1102 23:23:57.078653 2244 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222397 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d271c5b2-a322-4f19-b31b-a369b25f0d9b-xtables-lock\") pod \"kube-proxy-mt7ll\" (UID: \"d271c5b2-a322-4f19-b31b-a369b25f0d9b\") " pod="kube-system/kube-proxy-mt7ll" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222431 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/d44690e1-ff4e-4dcd-a772-c4f53bf4b188-xtables-lock\") pod \"kindnet-kt8qf\" (UID: \"d44690e1-ff4e-4dcd-a772-c4f53bf4b188\") " pod="kube-system/kindnet-kt8qf" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222445 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d44690e1-ff4e-4dcd-a772-c4f53bf4b188-lib-modules\") pod \"kindnet-kt8qf\" (UID: \"d44690e1-ff4e-4dcd-a772-c4f53bf4b188\") " pod="kube-system/kindnet-kt8qf" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222463 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z57cj\" (UniqueName: \"kubernetes.io/projected/d44690e1-ff4e-4dcd-a772-c4f53bf4b188-kube-api-access-z57cj\") pod \"kindnet-kt8qf\" (UID: \"d44690e1-ff4e-4dcd-a772-c4f53bf4b188\") " pod="kube-system/kindnet-kt8qf" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222485 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4bw6v\" (UniqueName: \"kubernetes.io/projected/d271c5b2-a322-4f19-b31b-a369b25f0d9b-kube-api-access-4bw6v\") pod \"kube-proxy-mt7ll\" (UID: \"d271c5b2-a322-4f19-b31b-a369b25f0d9b\") " pod="kube-system/kube-proxy-mt7ll" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222501 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/d271c5b2-a322-4f19-b31b-a369b25f0d9b-kube-proxy\") pod \"kube-proxy-mt7ll\" (UID: \"d271c5b2-a322-4f19-b31b-a369b25f0d9b\") " pod="kube-system/kube-proxy-mt7ll" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222517 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-cfg\" (UniqueName: \"kubernetes.io/host-path/d44690e1-ff4e-4dcd-a772-c4f53bf4b188-cni-cfg\") pod \"kindnet-kt8qf\" (UID: \"d44690e1-ff4e-4dcd-a772-c4f53bf4b188\") " pod="kube-system/kindnet-kt8qf" + Nov 02 23:23:58 kindnet-999044 kubelet[2244]: I1102 23:23:58.222530 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/d271c5b2-a322-4f19-b31b-a369b25f0d9b-lib-modules\") pod \"kube-proxy-mt7ll\" (UID: \"d271c5b2-a322-4f19-b31b-a369b25f0d9b\") " pod="kube-system/kube-proxy-mt7ll" + Nov 02 23:23:59 kindnet-999044 kubelet[2244]: I1102 23:23:59.353018 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-mt7ll" podStartSLOduration=1.35300191 podStartE2EDuration="1.35300191s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:23:59.352943356 +0000 UTC m=+7.106309043" watchObservedRunningTime="2025-11-02 23:23:59.35300191 +0000 UTC m=+7.106367591" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.200468 2244 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.210272 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kindnet-kt8qf" podStartSLOduration=15.903372039 podStartE2EDuration="25.210260857s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="2025-11-02 23:23:58.723753303 +0000 UTC m=+6.477118968" lastFinishedPulling="2025-11-02 23:24:08.030642102 +0000 UTC m=+15.784007786" observedRunningTime="2025-11-02 23:24:08.415044746 +0000 UTC m=+16.168410430" watchObservedRunningTime="2025-11-02 23:24:23.210260857 +0000 UTC m=+30.963626544" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384926 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz2j5\" (UniqueName: \"kubernetes.io/projected/941da621-3eb5-47e3-91e8-b4fb716f4886-kube-api-access-bz2j5\") pod \"storage-provisioner\" (UID: \"941da621-3eb5-47e3-91e8-b4fb716f4886\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384951 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/efdeaf6d-98c7-4ab4-bad4-08b7ca189206-config-volume\") pod \"coredns-66bc5c9577-ggj62\" (UID: \"efdeaf6d-98c7-4ab4-bad4-08b7ca189206\") " pod="kube-system/coredns-66bc5c9577-ggj62" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384964 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/941da621-3eb5-47e3-91e8-b4fb716f4886-tmp\") pod \"storage-provisioner\" (UID: \"941da621-3eb5-47e3-91e8-b4fb716f4886\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:23 kindnet-999044 kubelet[2244]: I1102 23:24:23.384978 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp96t\" (UniqueName: \"kubernetes.io/projected/efdeaf6d-98c7-4ab4-bad4-08b7ca189206-kube-api-access-wp96t\") pod \"coredns-66bc5c9577-ggj62\" (UID: \"efdeaf6d-98c7-4ab4-bad4-08b7ca189206\") " pod="kube-system/coredns-66bc5c9577-ggj62" + Nov 02 23:24:24 kindnet-999044 kubelet[2244]: I1102 23:24:24.435098 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=27.435082032 podStartE2EDuration="27.435082032s" podCreationTimestamp="2025-11-02 23:23:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.43486809 +0000 UTC m=+32.188233774" watchObservedRunningTime="2025-11-02 23:24:24.435082032 +0000 UTC m=+32.188447811" + Nov 02 23:24:24 kindnet-999044 kubelet[2244]: I1102 23:24:24.445357 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-ggj62" podStartSLOduration=26.445341358 podStartE2EDuration="26.445341358s" podCreationTimestamp="2025-11-02 23:23:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.440038584 +0000 UTC m=+32.193404271" watchObservedRunningTime="2025-11-02 23:24:24.445341358 +0000 UTC m=+32.198707049" + Nov 02 23:24:32 kindnet-999044 kubelet[2244]: I1102 23:24:32.537416 2244 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67ncn\" (UniqueName: \"kubernetes.io/projected/5dd1b35f-9c84-46a3-b6d8-563d845b737c-kube-api-access-67ncn\") pod \"netcat-cd4db9dbf-mssqr\" (UID: \"5dd1b35f-9c84-46a3-b6d8-563d845b737c\") " pod="default/netcat-cd4db9dbf-mssqr" + Nov 02 23:24:34 kindnet-999044 kubelet[2244]: I1102 23:24:34.482363 2244 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-mssqr" podStartSLOduration=1.662241777 podStartE2EDuration="2.482351356s" podCreationTimestamp="2025-11-02 23:24:32 +0000 UTC" firstStartedPulling="2025-11-02 23:24:32.842943044 +0000 UTC m=+40.596308726" lastFinishedPulling="2025-11-02 23:24:33.66305264 +0000 UTC m=+41.416418305" observedRunningTime="2025-11-02 23:24:34.482088326 +0000 UTC m=+42.235454016" watchObservedRunningTime="2025-11-02 23:24:34.482351356 +0000 UTC m=+42.235717041" + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.94.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:kindnet-999044 + name: system:node:kindnet-999044@mk + current-context: system:node:kindnet-999044@mk + kind: Config + users: + - name: system:node:kindnet-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.85.2:8443 + name: custom-flannel-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: false-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kindnet-999044 + contexts: + - context: + cluster: custom-flannel-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:04 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: custom-flannel-999044 + name: custom-flannel-999044 + - context: + cluster: false-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: false-999044 + name: false-999044 + - context: + cluster: kindnet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:23:57 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kindnet-999044 + name: kindnet-999044 + current-context: false-999044 + kind: Config + users: + - name: custom-flannel-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.key + - name: false-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.key + - name: kindnet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "360" + uid: 60115578-6e97-4da9-9681-40f10d05ff06 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "361" + uid: 3d1514a8-3983-4763-80b7-5d2f5de02ae2 + - apiVersion: v1 + data: + jws-kubeconfig-3lwn8u: eyJhbGciOiJIUzI1NiIsImtpZCI6IjNsd244dSJ9..12CR5Yw0SYUQjEAMlmnQgsCI_DQ18kWNykDc8FwGZos + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: cluster-info + namespace: kube-public + resourceVersion: "358" + uid: deaee2b7-bb2a-430d-b620-94fe26209e0f + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "362" + uid: ad6305a5-0fdd-455d-9d3c-57866580f983 + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.94.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:52Z" + name: coredns + namespace: kube-system + resourceVersion: "338" + uid: bca6b98d-f44f-45b0-ab2a-78fbec7bd70e + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIU/H2GK7RrswwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMxODQ0WhcNMzUxMDMxMjMyMzQ0 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAK34m9OwutebSsDKdkONL9z4b1Le2cJ6cxrVE63WvCXo2emp + I2U6qcMA9gVLl6c7v/MWUN1aKdL81osGR9r5DSW3p2b75MPb5MP9OsurTBFbVo0K + tnbL/40wSMOyvzObZxvZgUMU4VVXkFH5LpwWSCnKTUjxINbBEuUzs1kwdVdOUqxC + sMoyQEB4shEi2NA9h3vrhIupFVwJtv5ZT4DWRlNUK4Mpep40lKd5DdZDbtMWq2SB + hUhT/JkyZBJDU/PPqVR9Jh/1A11/Miujw+27+g4fAmkY/Izz2FJFEhePbTVii2PE + H9hYU2tc/Ep/wxXZrZxbjtNh6fFdWtgIUIEISXcCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFK/xwnnGiE36tx18aCQ6 + ny6jfH5cMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQA6RYxj6eBlwWRRkgTgZnqHeYyCX5Nt71TrFixcvk7STXGFhHymqXFWf5Eo + MXNQIBgz3I5TGYD0K9pfmlkVG/fcbomAnhkDaWvD0+azMCss7ypVSxQXTw+bSuNN + 71e4IQmPzDz1edMBil7M1bPepqT1ETZsFY6/ol98tjNZGlnRjqFxsAly3QO9doNT + DJPWOjwqyY/dBRTuxJW7SZ7XfMKvtmjqd0UKppR+LafCuC+wsYTFhERfB7fV9Zmm + B+ljh3sbFl9exqvRWOMcDHo6jbpTVfHWU+27i586uLD55JhWq2npJjAyGiIChkee + GXJxKzYUrvuLHH2OjsgHtcHjs3+e + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:50Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "31" + uid: 98d3cbc4-e083-402a-a629-599bd9476b2e + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:50Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "77" + uid: 2c1df947-1587-4b9a-b1b3-b47fb7972d99 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:52Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "289" + uid: 6c993251-f445-4a2f-8a1a-16bf081301fb + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:23:57Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "363" + uid: 6cac85fe-7e91-48d1-a4ce-b4319fe01b44 + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.94.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "243" + uid: 9bd91761-6877-4471-a61e-45214b3bef71 + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:23:51Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "246" + uid: 836b2979-0648-4467-b009-f0d49d1a721f + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://kindnet-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:42 UTC; 1min 7s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1047 (dockerd) + Tasks: 15 + Memory: 284.8M + CPU: 3.897s + CGroup: /system.slice/docker.service + └─1047 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.449287195Z" level=info msg="Loading containers: start." + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.597134881Z" level=info msg="Loading containers: done." + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.718563721Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.718637841Z" level=info msg="Initializing buildkit" + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.739872201Z" level=info msg="Completed buildkit initialization" + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.745467983Z" level=info msg="Daemon has completed initialization" + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.745526525Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.745526903Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:23:42 kindnet-999044 dockerd[1047]: time="2025-11-02T23:23:42.745550846Z" level=info msg="API listen on [::]:2376" + Nov 02 23:23:42 kindnet-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + + + >>> host: docker daemon config: + # ]8;;file://kindnet-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 18 + Running: 18 + Paused: 0 + Stopped: 0 + Images: 10 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: runc io.containerd.runc.v2 + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: kindnet-999044 + ID: 777c4404-1334-47a4-9b15-a2f70845668c + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://kindnet-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://kindnet-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:23:43 UTC; 1min 7s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1354 (cri-dockerd) + Tasks: 13 + Memory: 17.9M + CPU: 689ms + CGroup: /system.slice/cri-docker.service + └─1354 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:23:48 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:23:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/4a086422092865f28a7231c1e802f3a93c4e090d7441ffd3f3e9c1fde8a48d58/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:48 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:23:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/45c03f76c464bec93c2719a5d0bce5d4472724c41b955330c8b86f3d05495b8e/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:57 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:23:57Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:23:58 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:23:58Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ca022d863f380b60a3de622af5f7440b5f21a505eb4cf9b333ef6d80bbba51c6/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:23:58 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:23:58Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/368f30f21d30efd09463d98eb5d1140891096173246a3ad4f9458bad1200634c/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:08 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:24:08Z" level=info msg="Stop pulling image docker.io/kindest/kindnetd:v20250512-df8de77b: Status: Downloaded newer image for kindest/kindnetd:v20250512-df8de77b" + Nov 02 23:24:23 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:24:23Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0df6c714c264ffab320f755c3cf3c74eb50f525c712f54c586adf8e06eb508c4/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:23 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:24:23Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/727a593bf8a9646fcdd6e8af5859921e35f511a9f144d1af8584fd21a9dc74e2/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:32 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:24:32Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/446fbb1f754460f6ef6864a7448fe839be4a05a0267ac0e87433788dbcc2e5e0/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:33 kindnet-999044 cri-dockerd[1354]: time="2025-11-02T23:24:33Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://kindnet-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://kindnet-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://kindnet-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:23:42 UTC; 1min 9s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1033 (containerd) + Tasks: 209 + Memory: 102.9M + CPU: 1.059s + CGroup: /system.slice/containerd.service + ├─1033 /usr/bin/containerd + ├─1836 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 459af41f63e2895f81df97bd47dcb87fc1a9d0560dfb6a1f8fd78a0595efeaaf -address /run/containerd/containerd.sock + ├─1837 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a1f5d8ad4fe9be4e3ade6bd3aa58633c267ff92eefaabfbfc5fda7b2c0e751c5 -address /run/containerd/containerd.sock + ├─1866 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 4a086422092865f28a7231c1e802f3a93c4e090d7441ffd3f3e9c1fde8a48d58 -address /run/containerd/containerd.sock + ├─1927 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 45c03f76c464bec93c2719a5d0bce5d4472724c41b955330c8b86f3d05495b8e -address /run/containerd/containerd.sock + ├─2010 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 04b2427acfd48e80efac3047574fa3bbc9cce83d2b06cb69dbb884a76a76a28a -address /run/containerd/containerd.sock + ├─2037 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 31cac75a88ef1581a6affd402b368426d94de49e988980b4d636d1b374a7407e -address /run/containerd/containerd.sock + ├─2060 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 60ec6b14b48e64dda1cddc069b96159a8caaf9f2da58289ff641ed2399f96920 -address /run/containerd/containerd.sock + ├─2124 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1c4d707cd496dadcb70ff799da09ab35c244b96b5fa941648041a4fffb7e49e9 -address /run/containerd/containerd.sock + ├─2526 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 368f30f21d30efd09463d98eb5d1140891096173246a3ad4f9458bad1200634c -address /run/containerd/containerd.sock + ├─2564 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ca022d863f380b60a3de622af5f7440b5f21a505eb4cf9b333ef6d80bbba51c6 -address /run/containerd/containerd.sock + ├─2608 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a37c0429f9cdeabc68834c795ca464eb7f5ff27dec9e83506f379911437eaf46 -address /run/containerd/containerd.sock + ├─2816 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c5017ef31df1160aba2511e0da59d60dca70617a50def1330313d22ef1cdedbc -address /run/containerd/containerd.sock + ├─2936 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 727a593bf8a9646fcdd6e8af5859921e35f511a9f144d1af8584fd21a9dc74e2 -address /run/containerd/containerd.sock + ├─2938 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0df6c714c264ffab320f755c3cf3c74eb50f525c712f54c586adf8e06eb508c4 -address /run/containerd/containerd.sock + ├─3044 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 900557d3b6a31d02659c28884cfb0141d9e5d0f04d4c0e80a096f95ea68ea876 -address /run/containerd/containerd.sock + ├─3075 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9a8bd78558ee0b5f0a5b6e802e549eb0190410bf5010eb1ad83e11d5b03b9df4 -address /run/containerd/containerd.sock + ├─3188 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 446fbb1f754460f6ef6864a7448fe839be4a05a0267ac0e87433788dbcc2e5e0 -address /run/containerd/containerd.sock + └─3309 /usr/bin/containerd-shim-runc-v2 -namespace moby -id fdd544602bbe5368695b6cab70d31f44008ebcc40dc1dba7bc8fc50171d02289 -address /run/containerd/containerd.sock + + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134616640Z" level=info msg=serving... address=/run/containerd/containerd.sock + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134632234Z" level=info msg="Start event monitor" + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134640233Z" level=info msg="Start snapshots syncer" + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134650094Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134656487Z" level=info msg="Start streaming server" + Nov 02 23:23:42 kindnet-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:23:42 kindnet-999044 containerd[1033]: time="2025-11-02T23:23:42.134833536Z" level=info msg="containerd successfully booted in 0.018772s" + Nov 02 23:24:18 kindnet-999044 containerd[1033]: time="2025-11-02T23:24:18.382394079Z" level=error msg="failed to reload cni configuration after receiving fs change event(WRITE \"/etc/cni/net.d/10-kindnet.conflist.temp\")" error="cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config" + Nov 02 23:24:18 kindnet-999044 containerd[1033]: time="2025-11-02T23:24:18.382468660Z" level=error msg="failed to reload cni configuration after receiving fs change event(WRITE \"/etc/cni/net.d/10-kindnet.conflist.temp\")" error="cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config" + Nov 02 23:24:18 kindnet-999044 containerd[1033]: time="2025-11-02T23:24:18.382541874Z" level=error msg="failed to reload cni configuration after receiving fs change event(WRITE \"/etc/cni/net.d/10-kindnet.conflist.temp\")" error="cni config load failed: no network config found in /etc/cni/net.d: cni plugin not initialized: failed to load cni config" + + + >>> host: containerd daemon config: + # ]8;;file://kindnet-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://kindnet-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://kindnet-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:24:55.293261774Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:55.293279952Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:24:55.293314303Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:24:55.293329115Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:24:55.293372178Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:24:55.293468182Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: kindnet-999044 [took: 14.581804721s] -------------------------------- + helpers_test.go:175: Cleaning up "kindnet-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p kindnet-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p kindnet-999044: (3.122781991s) +=== CONT TestNetworkPlugins/group/kubenet +=== RUN TestNetworkPlugins/group/kubenet/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p kubenet-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker --container-runtime=docker + net_test.go:211: + ----------------------- debugLogs start: false-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.4 netcat-cd4db9dbf-lrsv6 + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.76.2 false-999044 + 192.168.76.1 host.minikube.internal + 192.168.76.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.76.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/false-999044 Ready control-plane 33s v1.34.1 192.168.76.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 32s + default service/netcat ClusterIP 10.108.248.171 8080/TCP 15s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 30s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.76.2:8443 32s + default endpoints/netcat 10.244.0.4:8080 15s + kube-system endpoints/k8s.io-minikube-hostpath 24s + kube-system endpoints/kube-dns 10.244.0.3:53,10.244.0.3:53,10.244.0.3:9153 25s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 30s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 15s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 30s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-lrsv6 1/1 Running 0 15s 10.244.0.4 false-999044 + kube-system pod/coredns-66bc5c9577-n9hbq 1/1 Running 0 25s 10.244.0.3 false-999044 + kube-system pod/etcd-false-999044 1/1 Running 0 31s 192.168.76.2 false-999044 + kube-system pod/kube-apiserver-false-999044 1/1 Running 0 31s 192.168.76.2 false-999044 + kube-system pod/kube-controller-manager-false-999044 1/1 Running 0 31s 192.168.76.2 false-999044 + kube-system pod/kube-proxy-f62r8 1/1 Running 0 25s 192.168.76.2 false-999044 + kube-system pod/kube-scheduler-false-999044 1/1 Running 0 31s 192.168.76.2 false-999044 + kube-system pod/storage-provisioner 1/1 Running 0 24s 192.168.76.2 false-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 69c9c83ffdf3b 15 seconds ago Ready netcat-cd4db9dbf-lrsv6 default 0 (default) + a475faf80adb4 25 seconds ago Ready storage-provisioner kube-system 0 (default) + 9c805584674cc 25 seconds ago Ready coredns-66bc5c9577-n9hbq kube-system 0 (default) + 285a7b9b51b30 25 seconds ago NotReady coredns-66bc5c9577-bvfjs kube-system 0 (default) + de7e83e71fa39 26 seconds ago Ready kube-proxy-f62r8 kube-system 0 (default) + cf459201581a1 35 seconds ago Ready kube-controller-manager-false-999044 kube-system 0 (default) + 0c9b7f5432317 35 seconds ago Ready kube-apiserver-false-999044 kube-system 0 (default) + 5aab7587e713e 35 seconds ago Ready etcd-false-999044 kube-system 0 (default) + bd00f85b51267 35 seconds ago Ready kube-scheduler-false-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 5d31cc088d33d registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 14 seconds ago Running dnsutils 0 69c9c83ffdf3b netcat-cd4db9dbf-lrsv6 default + b420e320cc4a5 6e38f40d628db 25 seconds ago Running storage-provisioner 0 a475faf80adb4 storage-provisioner kube-system + 2e79811fed34e 52546a367cc9e 25 seconds ago Running coredns 0 9c805584674cc coredns-66bc5c9577-n9hbq kube-system + a1f579c38aa81 fc25172553d79 26 seconds ago Running kube-proxy 0 de7e83e71fa39 kube-proxy-f62r8 kube-system + 1a97305adb509 c80c8dbafe7dd 35 seconds ago Running kube-controller-manager 0 cf459201581a1 kube-controller-manager-false-999044 kube-system + 1f828285a9bdf 5f1f5298c888d 35 seconds ago Running etcd 0 5aab7587e713e etcd-false-999044 kube-system + 3e6ade66ef266 c3994bc696102 35 seconds ago Running kube-apiserver 0 0c9b7f5432317 kube-apiserver-false-999044 kube-system + 0bfce1b4b741b 7dd6aaa1717ab 35 seconds ago Running kube-scheduler 0 bd00f85b51267 kube-scheduler-false-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:24:39 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 16s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-lrsv6 + Namespace: default + Priority: 0 + Service Account: default + Node: false-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:24:39 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.4 + IPs: + IP: 10.244.0.4 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://5d31cc088d33d093d59597fdfe038c5e0510b0913d227eef464741f18b9ba527 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:24:41 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-2nqhb (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-2nqhb: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 16s default-scheduler Successfully assigned default/netcat-cd4db9dbf-lrsv6 to false-999044 + Normal Pulling 15s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 14s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 973ms (973ms including waiting). Image size: 127004766 bytes. + Normal Created 14s kubelet Created container: dnsutils + Normal Started 14s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:24:24 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 26s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 25s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-n9hbq + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: false-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:24:29 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://2e79811fed34efd03acfa1da7a3c9e33173a0bb014c73df6b43bee74b5610b96 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:24:30 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-wwfw6 (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-wwfw6: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 26s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-n9hbq to false-999044 + Normal Pulled 25s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 25s kubelet Created container: coredns + Normal Started 25s kubelet Started container coredns + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + .:53 + [INFO] plugin/reload: Running configuration SHA512 = 3e2243e8b9e7116f563b83b1933f477a68ba9ad4a829ed5d7e54629fb2ce53528b9bc6023030be20be434ad805fd246296dd428c64e9bbef3a70f22b8621f560 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:54787 - 37552 "HINFO IN 6967173130667566237.8102591585074376993. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.026500594s + [INFO] 10.244.0.4:58082 - 64692 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.00013799s + [INFO] 10.244.0.4:60502 - 3518 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000231055s + [INFO] 10.244.0.4:44976 - 61357 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000081843s + [INFO] 10.244.0.4:54529 - 43947 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000157612s + [INFO] 10.244.0.4:54529 - 43720 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000204151s + [INFO] 10.244.0.4:38955 - 23684 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000197814s + [INFO] 10.244.0.4:38109 - 23609 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000135963s + [INFO] 10.244.0.4:59457 - 43712 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000113198s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-false-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: false-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:24:23 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.76.2:8443 + kubernetes.io/config.hash: 59e0c2140ad1dca05529d6417767c9fd + kubernetes.io/config.mirror: 59e0c2140ad1dca05529d6417767c9fd + kubernetes.io/config.seen: 2025-11-02T23:24:19.756638655Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: Node/false-999044 + Containers: + kube-apiserver: + Container ID: docker://3e6ade66ef26668010cfeb97b8d18c77a61192c6c27d7f09da6f73b59f531d4a + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.76.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:24:20 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.76.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:24:20.809052 1 options.go:263] external host was not specified, using 192.168.76.2 + I1102 23:24:20.810765 1 server.go:150] Version: v1.34.1 + I1102 23:24:20.810783 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:24:21.144865 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:24:21.144882 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:24:21.144886 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:24:21.144888 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:24:21.144890 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:24:21.144892 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:24:21.144894 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:24:21.144897 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:24:21.144899 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:24:21.144900 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:24:21.144903 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:24:21.144905 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:24:21.152996 1 logging.go:55] [core] [Channel #1 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.153084 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:24:21.153708 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:24:21.158642 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:24:21.162712 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:24:21.162726 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:24:21.162908 1 instance.go:239] Using reconciler: lease + W1102 23:24:21.163702 1 logging.go:55] [core] [Channel #9 SubChannel #10]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.167897 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.173357 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:24:21.178778 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:24:21.178791 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.181535 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:24:21.182007 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.186019 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.189972 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.194207 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.198258 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.202882 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.206473 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.209843 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.213238 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.217498 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.220873 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.224260 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.227764 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.231864 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.236228 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.239796 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.245515 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:24:21.259373 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:24:21.259518 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:24:21.260290 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.263523 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.267515 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.271560 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.275532 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.279568 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.282957 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.287022 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.290481 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.294567 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.298143 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.302799 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.310402 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.314677 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.318325 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.321780 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.326269 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.331260 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.334638 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.338710 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.342541 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.346638 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.350845 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.354693 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:24:21.358204 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:24:21.358717 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.361887 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.368051 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.372713 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.376701 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.380131 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.383848 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.387820 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.391904 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.395833 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.399109 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.403024 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.406889 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.421280 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:24:21.425187 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:24:21.429145 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:24:21.437653 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:24:21.437665 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.437668 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.437900 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:24:21.437904 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.438320 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:24:21.440401 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:24:21.440415 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:24:21.440419 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:24:21.442566 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:24:21.442581 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:24:21.443940 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:24:21.443979 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.443991 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.444993 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:24:21.445003 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.445006 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:24:21.445355 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:24:21.445361 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.446436 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:24:21.446444 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.446635 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:24:21.446638 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.446641 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.446968 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:24:21.446973 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:24:21.447644 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:24:21.447649 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.447652 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.447844 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:24:21.447847 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.447849 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.448839 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:24:21.448847 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.448849 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.449348 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:24:21.449354 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:24:21.449356 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:24:21.449358 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.450854 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:24:21.450865 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:24:21.450868 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:24:21.451655 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:24:21.451662 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.451664 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:24:21.451875 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:24:21.451879 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.451908 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:24:21.452691 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:24:21.452698 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:24:21.452703 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:24:21.454135 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:24:21.457401 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:24:21.457412 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:24:21.647263 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:24:21.647341 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:24:21.647437 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:24:21.647606 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:24:21.647638 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:24:21.647645 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:24:21.647651 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:24:21.647707 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:24:21.647721 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:24:21.647741 1 controller.go:142] Starting OpenAPI controller + I1102 23:24:21.647754 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:24:21.647759 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:24:21.647760 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:24:21.647770 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:24:21.647782 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:24:21.647790 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:24:21.647800 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:24:21.647813 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:24:21.647820 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:24:21.647849 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:24:21.647889 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:24:21.647893 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:24:21.647909 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:24:21.647925 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:24:21.653857 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:24:21.653910 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:24:21.653997 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:24:21.654053 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:24:21.654057 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:24:21.654068 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:24:21.654071 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:24:21.654075 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:24:21.654099 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:24:21.654107 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:24:21.654121 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:24:21.654153 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:24:21.654190 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:24:21.654523 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:24:21.654542 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:24:21.654546 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + E1102 23:24:21.719809 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" + I1102 23:24:21.748024 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:24:21.748043 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:24:21.748073 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:24:21.748095 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:24:21.748115 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:24:21.753957 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:24:21.753959 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:24:21.754130 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:24:21.754166 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:24:21.754173 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:24:21.754729 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:24:21.754753 1 aggregator.go:171] initial CRD sync complete... + I1102 23:24:21.754766 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:24:21.754770 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:24:21.754774 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:24:21.759349 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:24:21.759359 1 policy_source.go:240] refreshing policies + E1102 23:24:21.800622 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError" + I1102 23:24:21.849444 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:24:21.850373 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:21.850383 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:24:21.852152 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:21.852270 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:24:21.921201 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:24:22.657069 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:24:22.659064 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:24:22.659072 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:24:22.877313 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:24:22.895318 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:24:22.952569 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:24:22.955408 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2] + I1102 23:24:22.955952 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:24:22.958037 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:24:23.659024 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:24:24.109058 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:24:24.113473 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:24:24.117356 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:24:29.009428 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:24:29.411846 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:29.413946 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:24:29.760475 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:24:39.846088 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.108.248.171"} + E1102 23:24:47.950747 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:54408: use of closed network connection + E1102 23:24:48.031407 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:54424: use of closed network connection + E1102 23:24:48.120837 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:54442: use of closed network connection + E1102 23:24:48.218431 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:54460: use of closed network connection + E1102 23:24:53.346358 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:54474: use of closed network connection + E1102 23:24:53.870095 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:58230: use of closed network connection + E1102 23:24:53.942090 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:58246: use of closed network connection + E1102 23:24:54.010342 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:58254: use of closed network connection + E1102 23:24:54.089747 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:58262: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/10-crio-bridge.conflist.disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" } + ], + "ranges": [ + [{ "subnet": "10.244.0.0/16" }] + ] + } + } + ] + } + /etc/cni/net.d/87-podman-bridge.conflist + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.244.0.0/16", + "gateway": "10.244.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/cni.lock + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: eth0@if346: mtu 1500 qdisc noqueue state UP group default + link/ether 52:55:f8:01:e1:82 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.76.2/24 brd 192.168.76.255 scope global eth0 + valid_lft forever preferred_lft forever + 3: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether ca:e5:b2:92:07:c3 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 4: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 5: cni-podman0: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 46:53:d7:b6:3e:e9 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/16 brd 10.244.255.255 scope global cni-podman0 + valid_lft forever preferred_lft forever + inet6 fe80::4453:d7ff:feb6:3ee9/64 scope link + valid_lft forever preferred_lft forever + 7: vethd64a2f70@if3: mtu 1500 qdisc noqueue master cni-podman0 state UP group default + link/ether a2:61:8f:30:e7:25 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::a061:8fff:fe30:e725/64 scope link + valid_lft forever preferred_lft forever + 8: veth82c99b85@if3: mtu 1500 qdisc noqueue master cni-podman0 state UP group default + link/ether de:e5:c5:da:76:9d brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::dce5:c5ff:feda:769d/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.76.1 dev eth0 + 10.244.0.0/16 dev cni-podman0 proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.76.0/24 dev eth0 proto kernel scope link src 192.168.76.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:58 2025 + *mangle + :PREROUTING ACCEPT [17826:55931755] + :INPUT ACCEPT [17791:55928957] + :FORWARD ACCEPT [35:2798] + :OUTPUT ACCEPT [14330:5123831] + :POSTROUTING ACCEPT [14365:5126629] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:24:58 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:58 2025 + *filter + :INPUT ACCEPT [4859:1120604] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4816:1418018] + :CNI-ADMIN - [0:0] + :CNI-FORWARD - [0:0] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m comment --comment "CNI firewall plugin rules" -j CNI-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A CNI-FORWARD -m comment --comment "CNI firewall plugin admin overrides" -j CNI-ADMIN + -A CNI-FORWARD -d 10.244.0.3/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.3/32 -j ACCEPT + -A CNI-FORWARD -d 10.244.0.4/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.4/32 -j ACCEPT + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:24:58 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:24:58 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [67:4020] + :POSTROUTING ACCEPT [67:4020] + :CNI-682ce714db1c32d8f6663186 - [0:0] + :CNI-a6e40862ec21a8f6687ca1e2 - [0:0] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-6E7XQMQ4RAYOWTTM - [0:0] + :KUBE-SEP-M66F6TD25XSFZOMV - [0:0] + :KUBE-SEP-XPTUC4FRAFEFDBGF - [0:0] + :KUBE-SEP-ZP3FB6NMPNCO4VBJ - [0:0] + :KUBE-SEP-ZXMNUKOKXUTL2MK2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.76.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -s 10.244.0.3/32 -m comment --comment "name: \"podman\" id: \"9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f\"" -j CNI-682ce714db1c32d8f6663186 + -A POSTROUTING -s 10.244.0.4/32 -m comment --comment "name: \"podman\" id: \"69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476\"" -j CNI-a6e40862ec21a8f6687ca1e2 + -A CNI-682ce714db1c32d8f6663186 -d 10.244.0.0/16 -m comment --comment "name: \"podman\" id: \"9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f\"" -j ACCEPT + -A CNI-682ce714db1c32d8f6663186 ! -d 224.0.0.0/4 -m comment --comment "name: \"podman\" id: \"9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f\"" -j MASQUERADE + -A CNI-a6e40862ec21a8f6687ca1e2 -d 10.244.0.0/16 -m comment --comment "name: \"podman\" id: \"69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476\"" -j ACCEPT + -A CNI-a6e40862ec21a8f6687ca1e2 ! -d 224.0.0.0/4 -m comment --comment "name: \"podman\" id: \"69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476\"" -j MASQUERADE + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:44073 + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:56701 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 44073 -j SNAT --to-source 192.168.76.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 56701 -j SNAT --to-source 192.168.76.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-6E7XQMQ4RAYOWTTM -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-6E7XQMQ4RAYOWTTM -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.3:53 + -A KUBE-SEP-M66F6TD25XSFZOMV -s 192.168.76.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-M66F6TD25XSFZOMV -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.76.2:8443 + -A KUBE-SEP-XPTUC4FRAFEFDBGF -s 10.244.0.4/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-XPTUC4FRAFEFDBGF -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.4:8080 + -A KUBE-SEP-ZP3FB6NMPNCO4VBJ -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-ZP3FB6NMPNCO4VBJ -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.3:9153 + -A KUBE-SEP-ZXMNUKOKXUTL2MK2 -s 10.244.0.3/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-ZXMNUKOKXUTL2MK2 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.3:53 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.108.248.171/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.3:53" -j KUBE-SEP-ZXMNUKOKXUTL2MK2 + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.3:9153" -j KUBE-SEP-ZP3FB6NMPNCO4VBJ + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.76.2:8443" -j KUBE-SEP-M66F6TD25XSFZOMV + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.3:53" -j KUBE-SEP-6E7XQMQ4RAYOWTTM + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.108.248.171/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.4:8080" -j KUBE-SEP-XPTUC4FRAFEFDBGF + COMMIT + # Completed on Sun Nov 2 23:24:58 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 61 3845 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 2 170 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 67 packets, 4020 bytes) + pkts bytes target prot opt in out source destination + 453 38313 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 316 30247 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 82 4920 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 67 packets, 4020 bytes) + pkts bytes target prot opt in out source destination + 463 39048 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.76.1 + 0 0 CNI-682ce714db1c32d8f6663186 0 -- * * 10.244.0.3 0.0.0.0/0 /* name: "podman" id: "9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f" */ + 9 675 CNI-a6e40862ec21a8f6687ca1e2 0 -- * * 10.244.0.4 0.0.0.0/0 /* name: "podman" id: "69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476" */ + + Chain CNI-682ce714db1c32d8f6663186 (1 references) + pkts bytes target prot opt in out source destination + 0 0 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "podman" id: "9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f" */ + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "podman" id: "9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f" */ + + Chain CNI-a6e40862ec21a8f6687ca1e2 (1 references) + pkts bytes target prot opt in out source destination + 9 675 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "podman" id: "69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476" */ + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "podman" id: "69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476" */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.76.1 tcp dpt:53 to:127.0.0.11:44073 + 318 30417 DNAT 17 -- * * 0.0.0.0/0 192.168.76.1 udp dpt:53 to:127.0.0.11:56701 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:44073 to:192.168.76.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:56701 to:192.168.76.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 76 4695 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-6E7XQMQ4RAYOWTTM (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.3:53 + + Chain KUBE-SEP-M66F6TD25XSFZOMV (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 192.168.76.2 0.0.0.0/0 /* default/kubernetes:https */ + 7 420 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.76.2:8443 + + Chain KUBE-SEP-XPTUC4FRAFEFDBGF (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.4 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.4:8080 + + Chain KUBE-SEP-ZP3FB6NMPNCO4VBJ (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.3:9153 + + Chain KUBE-SEP-ZXMNUKOKXUTL2MK2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.3:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.108.248.171 /* default/netcat cluster IP */ tcp dpt:8080 + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 102 6120 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-ZXMNUKOKXUTL2MK2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.3:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-ZP3FB6NMPNCO4VBJ 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.3:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 7 420 KUBE-SEP-M66F6TD25XSFZOMV 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.76.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-6E7XQMQ4RAYOWTTM 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.3:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.108.248.171 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-XPTUC4FRAFEFDBGF 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.4:8080 */ + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 29s daemonset-controller Created pod: kube-proxy-f62r8 + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-f62r8 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: false-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:24:29 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://a1f579c38aa81e98147023b77ca050e5a8b03f467cbf22813e9511caa9340edd + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:24:29 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-c5mn5 (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-c5mn5: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 29s default-scheduler Successfully assigned kube-system/kube-proxy-f62r8 to false-999044 + Normal Pulled 29s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 29s kubelet Created container: kube-proxy + Normal Started 29s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:24:29.552638 1 server_linux.go:53] "Using iptables proxy" + I1102 23:24:29.601586 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:24:29.702218 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:24:29.702234 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.76.2"] + E1102 23:24:29.702299 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:24:29.718127 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:24:29.718156 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:24:29.721471 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:24:29.721689 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:24:29.721708 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:24:29.723352 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:24:29.723355 1 config.go:200] "Starting service config controller" + I1102 23:24:29.723365 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:24:29.723372 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:24:29.723352 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:24:29.723437 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:24:29.723445 1 config.go:309] "Starting node config controller" + I1102 23:24:29.723451 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:24:29.823737 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:24:29.823767 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + I1102 23:24:29.823786 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:24:29.823795 1 shared_informer.go:356] "Caches are synced" controller="service config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://false-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://false-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:24:23 UTC; 34s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2313 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 31.1M + CPU: 795ms + CGroup: /system.slice/kubelet.service + └─2313 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=false-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.227662 2313 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4dr7t\" (UniqueName: \"kubernetes.io/projected/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-kube-api-access-4dr7t\") on node \"false-999044\" DevicePath \"\"" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.227678 2313 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-config-volume\") on node \"false-999044\" DevicePath \"\"" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.972561 2313 scope.go:117] "RemoveContainer" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.979784 2313 scope.go:117] "RemoveContainer" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: E1102 23:24:36.980454 2313 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.980487 2313 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f"} err="failed to get container status \"17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:37 false-999044 kubelet[2313]: I1102 23:24:37.878728 2313 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b94b251f-6cd5-43cf-a16b-c8b04e7e09f9" path="/var/lib/kubelet/pods/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9/volumes" + Nov 02 23:24:39 false-999044 kubelet[2313]: I1102 23:24:39.846529 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nqhb\" (UniqueName: \"kubernetes.io/projected/631c7650-d88b-413d-af32-b6dfc3454f24-kube-api-access-2nqhb\") pod \"netcat-cd4db9dbf-lrsv6\" (UID: \"631c7650-d88b-413d-af32-b6dfc3454f24\") " pod="default/netcat-cd4db9dbf-lrsv6" + Nov 02 23:24:42 false-999044 kubelet[2313]: I1102 23:24:42.006799 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-lrsv6" podStartSLOduration=2.033718944 podStartE2EDuration="3.006784992s" podCreationTimestamp="2025-11-02 23:24:39 +0000 UTC" firstStartedPulling="2025-11-02 23:24:40.241274164 +0000 UTC m=+16.418280123" lastFinishedPulling="2025-11-02 23:24:41.214340208 +0000 UTC m=+17.391346171" observedRunningTime="2025-11-02 23:24:42.006645039 +0000 UTC m=+18.183651014" watchObservedRunningTime="2025-11-02 23:24:42.006784992 +0000 UTC m=+18.183790972" + Nov 02 23:24:53 false-999044 kubelet[2313]: E1102 23:24:53.870050 2313 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:40096->[::1]:43839: write tcp [::1]:40096->[::1]:43839: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://false-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://false-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=false-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:24:16 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:24:16 false-999044 kubelet[1606]: E1102 23:24:16.270343 1606 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:24:16 false-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:24:16 false-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:24:16 false-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:24:16 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:24:16 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:24:16 false-999044 kubelet[1770]: E1102 23:24:16.915394 1770 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:24:16 false-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:24:16 false-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:24:17 false-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:24:17 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:24:17 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:24:17 false-999044 kubelet[1780]: E1102 23:24:17.776657 1780 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:24:17 false-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:24:17 false-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:24:18 false-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:24:18 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:24:18 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:24:18 false-999044 kubelet[1790]: E1102 23:24:18.522644 1790 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:24:18 false-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:24:18 false-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:24:19 false-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:24:19 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:24:19 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:24:19 false-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 669. + Nov 02 23:24:19 false-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:24:19 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:24:19 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.739049 1829 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.739104 1829 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.739126 1829 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.739133 1829 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.739270 1829 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.741648 1829 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.76.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.741693 1829 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.744607 1829 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.747926 1829 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.747944 1829 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.748090 1829 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.748102 1829 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"false-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.748183 1829 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.748188 1829 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.748237 1829 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749094 1829 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749209 1829 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749217 1829 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749232 1829 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749248 1829 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.749605 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.76.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dfalse-999044&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.749634 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.76.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.749735 1829 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750050 1829 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750066 1829 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:24:19 false-999044 kubelet[1829]: W1102 23:24:19.750095 1829 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750612 1829 server.go:1262] "Started kubelet" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750702 1829 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750736 1829 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.750752 1829 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.751081 1829 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.751150 1829 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.751171 1829 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.753084 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.751008 1829 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.76.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.76.2:8443: connect: connection refused" event="&Event{ObjectMeta:{false-999044.187454221ff6d81a default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:false-999044,UID:false-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:false-999044,},FirstTimestamp:2025-11-02 23:24:19.750598682 +0000 UTC m=+0.450473227,LastTimestamp:2025-11-02 23:24:19.750598682 +0000 UTC m=+0.450473227,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:false-999044,}" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.753326 1829 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.753361 1829 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.753456 1829 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.753721 1829 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.753806 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.76.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.754115 1829 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.754178 1829 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.754481 1829 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/false-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.755172 1829 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.758144 1829 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.759666 1829 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.759680 1829 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.759703 1829 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.760233 1829 policy_none.go:49] "None policy: Start" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.760246 1829 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.760256 1829 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.760880 1829 policy_none.go:47] "Start" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.765034 1829 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.765050 1829 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.765072 1829 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.765117 1829 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.765361 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.76.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.784428 1829 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.784513 1829 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.784521 1829 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.784641 1829 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.785032 1829 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.785057 1829 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"false-999044\" not found" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.878467 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.881008 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.884874 1829 kubelet_node_status.go:75] "Attempting to register node" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.885086 1829 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.889853 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.891463 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954705 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/2b41d21d701963cf416390ac25a479a9-kubeconfig\") pod \"kube-scheduler-false-999044\" (UID: \"2b41d21d701963cf416390ac25a479a9\") " pod="kube-system/kube-scheduler-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954728 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/6b3c99521de900f086f7f47514ff2df5-etcd-data\") pod \"etcd-false-999044\" (UID: \"6b3c99521de900f086f7f47514ff2df5\") " pod="kube-system/etcd-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954741 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/6b3c99521de900f086f7f47514ff2df5-etcd-certs\") pod \"etcd-false-999044\" (UID: \"6b3c99521de900f086f7f47514ff2df5\") " pod="kube-system/etcd-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954750 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-usr-share-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954765 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-kubeconfig\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954788 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-etc-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954837 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-k8s-certs\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: E1102 23:24:19.954851 1829 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/false-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954863 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-usr-local-share-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954904 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-etc-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954923 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-k8s-certs\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954938 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-usr-share-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954960 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-ca-certs\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.954987 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-ca-certs\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.955002 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-flexvolume-dir\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:19 false-999044 kubelet[1829]: I1102 23:24:19.955032 1829 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-usr-local-share-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: I1102 23:24:20.085845 1829 kubelet_node_status.go:75] "Attempting to register node" node="false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.086196 1829 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.355556 1829 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/false-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:24:20 false-999044 kubelet[1829]: I1102 23:24:20.487097 1829 kubelet_node_status.go:75] "Attempting to register node" node="false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.487298 1829 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.565103 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.76.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.787605 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.808453 1829 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.76.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dfalse-999044&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:24:20 false-999044 kubelet[1829]: E1102 23:24:20.817728 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: I1102 23:24:21.288533 1829 kubelet_node_status.go:75] "Attempting to register node" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.676269 1829 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: I1102 23:24:21.772348 1829 kubelet_node_status.go:78] "Successfully registered node" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.772369 1829 kubelet_node_status.go:486] "Error updating node status, will retry" err="error getting node \"false-999044\": node \"false-999044\" not found" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.776440 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.823744 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.826720 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.829645 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.834050 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.877119 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:21 false-999044 kubelet[1829]: E1102 23:24:21.977641 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.078118 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.178643 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.279960 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.380528 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.480932 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.581414 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.682035 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.782221 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.838482 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.838512 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.838559 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.838656 1829 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"false-999044\" not found" node="false-999044" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.883091 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:22 false-999044 kubelet[1829]: E1102 23:24:22.983581 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: E1102 23:24:23.084081 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: E1102 23:24:23.184533 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: E1102 23:24:23.284564 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: E1102 23:24:23.385025 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: E1102 23:24:23.485498 1829 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.554487 1829 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.587027 1829 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.635743 1829 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.638090 1829 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-false-999044" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.640049 1829 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-false-999044" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.751215 1829 apiserver.go:52] "Watching apiserver" + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.753864 1829 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:24:23 false-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:24:23 false-999044 kubelet[1829]: I1102 23:24:23.795054 1829 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:24:23 false-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:24:23 false-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:24:23 false-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.847218 2313 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.847265 2313 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.847280 2313 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.847284 2313 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.847424 2313 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.848190 2313 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.849407 2313 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.852083 2313 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855265 2313 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855281 2313 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855397 2313 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855408 2313 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"false-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855492 2313 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855497 2313 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855511 2313 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.855919 2313 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.856047 2313 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.856058 2313 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.856072 2313 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.856084 2313 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.856762 2313 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857117 2313 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857138 2313 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857636 2313 server.go:1262] "Started kubelet" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857685 2313 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857801 2313 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.857900 2313 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.858184 2313 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.858476 2313 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.858592 2313 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.861045 2313 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"false-999044\" not found" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.861165 2313 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.861214 2313 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.861579 2313 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.861741 2313 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.862462 2313 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.862604 2313 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.867466 2313 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.868994 2313 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.874721 2313 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.874736 2313 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.874751 2313 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.874799 2313 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883204 2313 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883218 2313 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883233 2313 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883323 2313 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883328 2313 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883339 2313 policy_none.go:49] "None policy: Start" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883344 2313 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883350 2313 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883424 2313 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.883431 2313 policy_none.go:47] "Start" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.886157 2313 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.886246 2313 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.886253 2313 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.886368 2313 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.887064 2313 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.976137 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.976202 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.976238 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.976259 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.979016 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-false-999044\" already exists" pod="kube-system/kube-scheduler-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.979126 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-false-999044\" already exists" pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.979155 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-false-999044\" already exists" pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: E1102 23:24:23.979166 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-false-999044\" already exists" pod="kube-system/etcd-false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.987546 2313 kubelet_node_status.go:75] "Attempting to register node" node="false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.990427 2313 kubelet_node_status.go:124] "Node was previously registered" node="false-999044" + Nov 02 23:24:23 false-999044 kubelet[2313]: I1102 23:24:23.990476 2313 kubelet_node_status.go:78] "Successfully registered node" node="false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063310 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-kubeconfig\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063336 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-ca-certs\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063355 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-ca-certs\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063364 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-etc-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063376 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-flexvolume-dir\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063405 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-usr-local-share-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063437 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/2b41d21d701963cf416390ac25a479a9-kubeconfig\") pod \"kube-scheduler-false-999044\" (UID: \"2b41d21d701963cf416390ac25a479a9\") " pod="kube-system/kube-scheduler-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063447 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/6b3c99521de900f086f7f47514ff2df5-etcd-data\") pod \"etcd-false-999044\" (UID: \"6b3c99521de900f086f7f47514ff2df5\") " pod="kube-system/etcd-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063455 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-etc-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063467 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-usr-local-share-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063499 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-usr-share-ca-certificates\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063520 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-k8s-certs\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063529 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/50577b3c7932382aa82e7916bd5575d1-usr-share-ca-certificates\") pod \"kube-controller-manager-false-999044\" (UID: \"50577b3c7932382aa82e7916bd5575d1\") " pod="kube-system/kube-controller-manager-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063540 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/6b3c99521de900f086f7f47514ff2df5-etcd-certs\") pod \"etcd-false-999044\" (UID: \"6b3c99521de900f086f7f47514ff2df5\") " pod="kube-system/etcd-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.063552 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/59e0c2140ad1dca05529d6417767c9fd-k8s-certs\") pod \"kube-apiserver-false-999044\" (UID: \"59e0c2140ad1dca05529d6417767c9fd\") " pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.856631 2313 apiserver.go:52] "Watching apiserver" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.862437 2313 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.899238 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.899253 2313 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: E1102 23:24:24.902088 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-false-999044\" already exists" pod="kube-system/etcd-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: E1102 23:24:24.902345 2313 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-false-999044\" already exists" pod="kube-system/kube-apiserver-false-999044" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.921764 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-false-999044" podStartSLOduration=1.921752745 podStartE2EDuration="1.921752745s" podCreationTimestamp="2025-11-02 23:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.916324697 +0000 UTC m=+1.093330682" watchObservedRunningTime="2025-11-02 23:24:24.921752745 +0000 UTC m=+1.098758727" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.921886 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-false-999044" podStartSLOduration=1.921881549 podStartE2EDuration="1.921881549s" podCreationTimestamp="2025-11-02 23:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.921354574 +0000 UTC m=+1.098360554" watchObservedRunningTime="2025-11-02 23:24:24.921881549 +0000 UTC m=+1.098887531" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.929910 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-false-999044" podStartSLOduration=1.9299021939999998 podStartE2EDuration="1.929902194s" podCreationTimestamp="2025-11-02 23:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.929893959 +0000 UTC m=+1.106899939" watchObservedRunningTime="2025-11-02 23:24:24.929902194 +0000 UTC m=+1.106908175" + Nov 02 23:24:24 false-999044 kubelet[2313]: I1102 23:24:24.930008 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-false-999044" podStartSLOduration=1.930004749 podStartE2EDuration="1.930004749s" podCreationTimestamp="2025-11-02 23:24:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:24.926250079 +0000 UTC m=+1.103256056" watchObservedRunningTime="2025-11-02 23:24:24.930004749 +0000 UTC m=+1.107010774" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.087762 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/2f564339-7606-46ed-ab64-4e4ada43d7f0-xtables-lock\") pod \"kube-proxy-f62r8\" (UID: \"2f564339-7606-46ed-ab64-4e4ada43d7f0\") " pod="kube-system/kube-proxy-f62r8" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.087790 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/2f564339-7606-46ed-ab64-4e4ada43d7f0-lib-modules\") pod \"kube-proxy-f62r8\" (UID: \"2f564339-7606-46ed-ab64-4e4ada43d7f0\") " pod="kube-system/kube-proxy-f62r8" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.087805 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5mn5\" (UniqueName: \"kubernetes.io/projected/2f564339-7606-46ed-ab64-4e4ada43d7f0-kube-api-access-c5mn5\") pod \"kube-proxy-f62r8\" (UID: \"2f564339-7606-46ed-ab64-4e4ada43d7f0\") " pod="kube-system/kube-proxy-f62r8" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.087831 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/2f564339-7606-46ed-ab64-4e4ada43d7f0-kube-proxy\") pod \"kube-proxy-f62r8\" (UID: \"2f564339-7606-46ed-ab64-4e4ada43d7f0\") " pod="kube-system/kube-proxy-f62r8" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.894204 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-config-volume\") pod \"coredns-66bc5c9577-bvfjs\" (UID: \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\") " pod="kube-system/coredns-66bc5c9577-bvfjs" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.894331 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dr7t\" (UniqueName: \"kubernetes.io/projected/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-kube-api-access-4dr7t\") pod \"coredns-66bc5c9577-bvfjs\" (UID: \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\") " pod="kube-system/coredns-66bc5c9577-bvfjs" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.894353 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwfw6\" (UniqueName: \"kubernetes.io/projected/74ad13d1-c86f-4907-8bfc-f52fc61d4416-kube-api-access-wwfw6\") pod \"coredns-66bc5c9577-n9hbq\" (UID: \"74ad13d1-c86f-4907-8bfc-f52fc61d4416\") " pod="kube-system/coredns-66bc5c9577-n9hbq" + Nov 02 23:24:29 false-999044 kubelet[2313]: I1102 23:24:29.894375 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/74ad13d1-c86f-4907-8bfc-f52fc61d4416-config-volume\") pod \"coredns-66bc5c9577-n9hbq\" (UID: \"74ad13d1-c86f-4907-8bfc-f52fc61d4416\") " pod="kube-system/coredns-66bc5c9577-n9hbq" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.014618 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-f62r8" podStartSLOduration=1.014578409 podStartE2EDuration="1.014578409s" podCreationTimestamp="2025-11-02 23:24:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:29.925587319 +0000 UTC m=+6.102593310" watchObservedRunningTime="2025-11-02 23:24:30.014578409 +0000 UTC m=+6.191584483" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.197064 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/95444b0e-006c-4448-b6db-1e9a761b7238-tmp\") pod \"storage-provisioner\" (UID: \"95444b0e-006c-4448-b6db-1e9a761b7238\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.197106 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdwd2\" (UniqueName: \"kubernetes.io/projected/95444b0e-006c-4448-b6db-1e9a761b7238-kube-api-access-wdwd2\") pod \"storage-provisioner\" (UID: \"95444b0e-006c-4448-b6db-1e9a761b7238\") " pod="kube-system/storage-provisioner" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.936635 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-n9hbq" podStartSLOduration=1.936619613 podStartE2EDuration="1.936619613s" podCreationTimestamp="2025-11-02 23:24:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:30.936132144 +0000 UTC m=+7.113138132" watchObservedRunningTime="2025-11-02 23:24:30.936619613 +0000 UTC m=+7.113625585" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.955741 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-bvfjs" podStartSLOduration=1.9557256619999999 podStartE2EDuration="1.955725662s" podCreationTimestamp="2025-11-02 23:24:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:30.95326398 +0000 UTC m=+7.130269965" watchObservedRunningTime="2025-11-02 23:24:30.955725662 +0000 UTC m=+7.132731640" + Nov 02 23:24:30 false-999044 kubelet[2313]: I1102 23:24:30.975988 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=0.975977416 podStartE2EDuration="975.977416ms" podCreationTimestamp="2025-11-02 23:24:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:24:30.975829808 +0000 UTC m=+7.152835793" watchObservedRunningTime="2025-11-02 23:24:30.975977416 +0000 UTC m=+7.152983393" + Nov 02 23:24:32 false-999044 kubelet[2313]: I1102 23:24:32.474530 2313 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:24:34 false-999044 kubelet[2313]: I1102 23:24:34.305643 2313 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:24:34 false-999044 kubelet[2313]: I1102 23:24:34.306027 2313 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.126953 2313 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-config-volume\") pod \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\" (UID: \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\") " + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.126977 2313 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4dr7t\" (UniqueName: \"kubernetes.io/projected/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-kube-api-access-4dr7t\") pod \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\" (UID: \"b94b251f-6cd5-43cf-a16b-c8b04e7e09f9\") " + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.127278 2313 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-config-volume" (OuterVolumeSpecName: "config-volume") pod "b94b251f-6cd5-43cf-a16b-c8b04e7e09f9" (UID: "b94b251f-6cd5-43cf-a16b-c8b04e7e09f9"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.128313 2313 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-kube-api-access-4dr7t" (OuterVolumeSpecName: "kube-api-access-4dr7t") pod "b94b251f-6cd5-43cf-a16b-c8b04e7e09f9" (UID: "b94b251f-6cd5-43cf-a16b-c8b04e7e09f9"). InnerVolumeSpecName "kube-api-access-4dr7t". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.227662 2313 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4dr7t\" (UniqueName: \"kubernetes.io/projected/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-kube-api-access-4dr7t\") on node \"false-999044\" DevicePath \"\"" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.227678 2313 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9-config-volume\") on node \"false-999044\" DevicePath \"\"" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.972561 2313 scope.go:117] "RemoveContainer" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.979784 2313 scope.go:117] "RemoveContainer" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: E1102 23:24:36.980454 2313 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: 17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" containerID="17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:36 false-999044 kubelet[2313]: I1102 23:24:36.980487 2313 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f"} err="failed to get container status \"17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f\": rpc error: code = Unknown desc = Error response from daemon: No such container: 17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f" + Nov 02 23:24:37 false-999044 kubelet[2313]: I1102 23:24:37.878728 2313 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b94b251f-6cd5-43cf-a16b-c8b04e7e09f9" path="/var/lib/kubelet/pods/b94b251f-6cd5-43cf-a16b-c8b04e7e09f9/volumes" + Nov 02 23:24:39 false-999044 kubelet[2313]: I1102 23:24:39.846529 2313 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nqhb\" (UniqueName: \"kubernetes.io/projected/631c7650-d88b-413d-af32-b6dfc3454f24-kube-api-access-2nqhb\") pod \"netcat-cd4db9dbf-lrsv6\" (UID: \"631c7650-d88b-413d-af32-b6dfc3454f24\") " pod="default/netcat-cd4db9dbf-lrsv6" + Nov 02 23:24:42 false-999044 kubelet[2313]: I1102 23:24:42.006799 2313 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-lrsv6" podStartSLOduration=2.033718944 podStartE2EDuration="3.006784992s" podCreationTimestamp="2025-11-02 23:24:39 +0000 UTC" firstStartedPulling="2025-11-02 23:24:40.241274164 +0000 UTC m=+16.418280123" lastFinishedPulling="2025-11-02 23:24:41.214340208 +0000 UTC m=+17.391346171" observedRunningTime="2025-11-02 23:24:42.006645039 +0000 UTC m=+18.183651014" watchObservedRunningTime="2025-11-02 23:24:42.006784992 +0000 UTC m=+18.183790972" + Nov 02 23:24:53 false-999044 kubelet[2313]: E1102 23:24:53.870050 2313 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:40096->[::1]:43839: write tcp [::1]:40096->[::1]:43839: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.76.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:false-999044 + name: system:node:false-999044@mk + current-context: system:node:false-999044@mk + kind: Config + users: + - name: system:node:false-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: false-999044 + contexts: + - context: + cluster: false-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:24:29 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: false-999044 + name: false-999044 + current-context: false-999044 + kind: Config + users: + - name: false-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:28Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "294" + uid: da0d310f-d4d9-4bc2-aa9e-93de30e1ca80 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:28Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "295" + uid: b07a8aeb-c2a7-4ea5-b60b-f24699d5728f + - apiVersion: v1 + data: + jws-kubeconfig-s7f34q: eyJhbGciOiJIUzI1NiIsImtpZCI6InM3ZjM0cSJ9..tHBHuOoUu-fdoSXd-xQU9Dap-sx8cYUB4HRsrMxWzF4 + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:23Z" + name: cluster-info + namespace: kube-public + resourceVersion: "324" + uid: 2366a35d-5dcd-40fc-bd88-ef3f00a1b3e3 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:28Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "296" + uid: d5854e3d-58e2-4076-9b34-ff61f0103849 + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.76.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:23Z" + name: coredns + namespace: kube-system + resourceVersion: "346" + uid: 8fd738e3-56a4-4ece-bacc-f93474d2c874 + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIOgAWMfax4r0wDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMxOTE2WhcNMzUxMDMxMjMyNDE2 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAOvXo2vg+g6KJWONVCgKPf32zlCvwoZvHHX2bCdOxs5RayAA + SHiB42LvEQY33B1R25UDjz3vCnQs030q4jFAf333Sf+nu4gejwIh+jKrAeYrPueU + nnksKPRPJo/B3m1W8IjDmSjeTBx2t13MxOhgw7yNNvF1hEwM1b9gn6dOS4wzkxbf + +G05HnB015f61W/2hBfocn8EPa37kgS2+2Kam14GwJrLNgMaWkWVKmwyULm/DxFw + Og4N8KlEMPziwggPFiUlmq8GTRtOoJLt9jWSzW3vGbdGCtHAIlJkHtQVY5n64Dlp + 2Bto9EmE2qW5NOkZ1TquczISNz9kHdVkExSIy+UCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe+TTWq8fLQESQpNbEo + TRHGHav3MBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAXymtMKz4O2WAWbbLtXOeC1fMNSX0FrkFRQRmxazZsfOsy6U1Nw+vyx/yS + 4fB1a7y54prrqnxmLxbNOWJdsI9lbGvoW2d5ZXFKIBfmFPIb75Dfu8sS0Y2VkYI+ + CwR6cUm/P37zpFTYaQquFPGoePwJcLeDMysxWIPpuTC9EbqcUCWmcZ+GMIggKPGv + Mogfrpr31QDJyk2bidpjjI4xNYe9+Bd3mlnUaFvGWvzmSwNywc7YZy7yqeB7wOSN + XQ/RyoDId18Mrv94N5dkIqV+TxvvzmtIhHgIrDBmAPQLsN8K5YfgxIL2idNBuuMt + LFrcf4iQfYhI2qH2yXmbO2qUf1zG + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:21Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "27" + uid: 93ceb2f6-af49-491b-a7ef-0e041dddcffe + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:21Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "20" + uid: f8155b30-c49c-4873-8296-1ad391c0446b + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:24Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "252" + uid: 20b599c4-1613-41e8-a421-455cc222adeb + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:24:28Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "297" + uid: 510dc549-4683-4319-845d-538f2c92ece1 + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.76.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:23Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "206" + uid: d89243b1-acae-4dde-9c6d-956d4a82499b + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:24:23Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "209" + uid: 2ff76816-8c11-4698-b126-c49b38dc762c + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://false-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:24:15 UTC; 44s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1053 (dockerd) + Tasks: 14 + Memory: 170.7M + CPU: 2.396s + CGroup: /system.slice/docker.service + └─1053 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.644907056Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.644956572Z" level=info msg="Initializing buildkit" + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.656929418Z" level=info msg="Completed buildkit initialization" + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.659231043Z" level=info msg="Daemon has completed initialization" + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.659284950Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:24:15 false-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.659288918Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:24:15 false-999044 dockerd[1053]: time="2025-11-02T23:24:15.659302508Z" level=info msg="API listen on [::]:2376" + Nov 02 23:24:35 false-999044 dockerd[1053]: time="2025-11-02T23:24:35.961640581Z" level=info msg="ignoring event" container=17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:24:36 false-999044 dockerd[1053]: time="2025-11-02T23:24:36.022668032Z" level=info msg="ignoring event" container=285a7b9b51b302755a97d69b80ede9fe1582116464e96d422f956bb570cdeca8 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://false-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 17 + Running: 16 + Paused: 0 + Stopped: 1 + Images: 9 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: runc io.containerd.runc.v2 + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: false-999044 + ID: 47970413-4a99-4b4d-b02d-d9c0846d7e6f + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://false-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://false-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:24:15 UTC; 46s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1362 (cri-dockerd) + Tasks: 13 + Memory: 17.1M + CPU: 648ms + CGroup: /system.slice/cri-docker.service + └─1362 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:24:20 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0c9b7f5432317c28a244af5fcbbba7ac8848a85162535186b87098a9afd756e5/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:20 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/5aab7587e713ee5596a01bab9eced086f184b959291a271ecba057e0ed7ad1c2/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:20 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/cf459201581a1c86cb06b24b661a9337159960c9a23d09574fa880dcc54973bc/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:29 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:29Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/de7e83e71fa3979d41a9b69c4661c71ae8a7a8a5c711c97c981cda95e1f10a38/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:30 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:30Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/285a7b9b51b302755a97d69b80ede9fe1582116464e96d422f956bb570cdeca8/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:30 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:30Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:30 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:30Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a475faf80adb4b783470b819026ae1b7eff9a25dd494bdfd6a41012c07370ae8/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:34 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:34Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:24:40 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:40Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:24:41 false-999044 cri-dockerd[1362]: time="2025-11-02T23:24:41Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://false-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://false-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://false-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:24:15 UTC; 47s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1038 (containerd) + Tasks: 186 + Memory: 93.7M + CPU: 1.065s + CGroup: /system.slice/containerd.service + ├─1038 /usr/bin/containerd + ├─1889 /usr/bin/containerd-shim-runc-v2 -namespace moby -id bd00f85b5126772bade65e862963c181834e4be29905ac4d51f5477884e92951 -address /run/containerd/containerd.sock + ├─1890 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5aab7587e713ee5596a01bab9eced086f184b959291a271ecba057e0ed7ad1c2 -address /run/containerd/containerd.sock + ├─1893 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0c9b7f5432317c28a244af5fcbbba7ac8848a85162535186b87098a9afd756e5 -address /run/containerd/containerd.sock + ├─1970 /usr/bin/containerd-shim-runc-v2 -namespace moby -id cf459201581a1c86cb06b24b661a9337159960c9a23d09574fa880dcc54973bc -address /run/containerd/containerd.sock + ├─2057 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0bfce1b4b741ba8a7ed0f3598c11251a74fd486f07bd99ecb5fd15b35b7c005a -address /run/containerd/containerd.sock + ├─2078 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 3e6ade66ef26668010cfeb97b8d18c77a61192c6c27d7f09da6f73b59f531d4a -address /run/containerd/containerd.sock + ├─2088 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1f828285a9bdf11d5433f1c5d5ad92393000fbd433d5d763f4b97e0749f8b096 -address /run/containerd/containerd.sock + ├─2158 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1a97305adb50984d2f14fcec2958d4ce060dcc2008c50a19f7a015c399b22eed -address /run/containerd/containerd.sock + ├─2520 /usr/bin/containerd-shim-runc-v2 -namespace moby -id de7e83e71fa3979d41a9b69c4661c71ae8a7a8a5c711c97c981cda95e1f10a38 -address /run/containerd/containerd.sock + ├─2565 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a1f579c38aa81e98147023b77ca050e5a8b03f467cbf22813e9511caa9340edd -address /run/containerd/containerd.sock + ├─3016 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9c805584674cc6e433003651b9e719d39cadeaed8985415ae2d0b7d9b984574f -address /run/containerd/containerd.sock + ├─3119 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2e79811fed34efd03acfa1da7a3c9e33173a0bb014c73df6b43bee74b5610b96 -address /run/containerd/containerd.sock + ├─3173 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a475faf80adb4b783470b819026ae1b7eff9a25dd494bdfd6a41012c07370ae8 -address /run/containerd/containerd.sock + ├─3218 /usr/bin/containerd-shim-runc-v2 -namespace moby -id b420e320cc4a5439f73591991f1e980bb5fc80fbe1d5729fe1c93753fa34bb96 -address /run/containerd/containerd.sock + ├─3434 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 69c9c83ffdf3bda24e4b2b75ef9be16ae5470ed59d59ab1bdbf18982040c9476 -address /run/containerd/containerd.sock + └─3597 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5d31cc088d33d093d59597fdfe038c5e0510b0913d227eef464741f18b9ba527 -address /run/containerd/containerd.sock + + Nov 02 23:24:15 false-999044 containerd[1038]: time="2025-11-02T23:24:15.462052706Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:24:15 false-999044 containerd[1038]: time="2025-11-02T23:24:15.462093795Z" level=info msg="Start streaming server" + Nov 02 23:24:15 false-999044 containerd[1038]: time="2025-11-02T23:24:15.462134358Z" level=info msg="containerd successfully booted in 0.013601s" + Nov 02 23:24:15 false-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:24:35 false-999044 containerd[1038]: time="2025-11-02T23:24:35.961531465Z" level=info msg="shim disconnected" id=17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f namespace=moby + Nov 02 23:24:35 false-999044 containerd[1038]: time="2025-11-02T23:24:35.961566924Z" level=warning msg="cleaning up after shim disconnected" id=17e84b3f6e33f5f53d4c4c70322c28490e1a813f809a5695925138306487992f namespace=moby + Nov 02 23:24:35 false-999044 containerd[1038]: time="2025-11-02T23:24:35.961575280Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:24:36 false-999044 containerd[1038]: time="2025-11-02T23:24:36.022678041Z" level=info msg="shim disconnected" id=285a7b9b51b302755a97d69b80ede9fe1582116464e96d422f956bb570cdeca8 namespace=moby + Nov 02 23:24:36 false-999044 containerd[1038]: time="2025-11-02T23:24:36.022706898Z" level=warning msg="cleaning up after shim disconnected" id=285a7b9b51b302755a97d69b80ede9fe1582116464e96d422f956bb570cdeca8 namespace=moby + Nov 02 23:24:36 false-999044 containerd[1038]: time="2025-11-02T23:24:36.022711504Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://false-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://false-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://false-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:25:04.797100246Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:25:04.797125204Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:25:04.79715235Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:25:04.797165596Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:25:04.79721017Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:25:04.797295707Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: false-999044 [took: 16.697772446s] -------------------------------- + helpers_test.go:175: Cleaning up "false-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p false-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p false-999044: (1.723633597s) +=== CONT TestNetworkPlugins/group/enable-default-cni +=== RUN TestNetworkPlugins/group/enable-default-cni/Start + net_test.go:112: (dbg) Run: out/minikube-linux-amd64 start -p enable-default-cni-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker --container-runtime=docker +E1102 23:25:27.615332 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:25:32.448118 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/functional-172481/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p flannel-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=flannel --driver=docker --container-runtime=docker: (44.597986015s) +=== RUN TestNetworkPlugins/group/flannel/ControllerPod + net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: waiting 10m0s for pods matching "app=flannel" in namespace "kube-flannel" ... + helpers_test.go:352: "kube-flannel-ds-n2vp7" [e086ec67-60c2-4c87-b47f-0aaeace7a929] Running + net_test.go:120: (dbg) TestNetworkPlugins/group/flannel/ControllerPod: app=flannel healthy within 6.001946719s +=== RUN TestNetworkPlugins/group/flannel/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p flannel-999044 "pgrep -a kubelet" +I1102 23:25:45.629624 37869 config.go:182] Loaded profile config "flannel-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/flannel/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context flannel-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-r5v44" [58acc91e-9380-4dfb-ab88-7c9ca7303409] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p bridge-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --cni=bridge --driver=docker --container-runtime=docker: (57.65422787s) +=== RUN TestNetworkPlugins/group/bridge/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p bridge-999044 "pgrep -a kubelet" +I1102 23:25:47.735334 37869 config.go:182] Loaded profile config "bridge-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/bridge/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context bridge-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-fjnrs" [bc331d87-4758-44bb-beed-a74c7b6c1f89] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + helpers_test.go:352: "netcat-cd4db9dbf-r5v44" [58acc91e-9380-4dfb-ab88-7c9ca7303409] Running + helpers_test.go:352: "netcat-cd4db9dbf-fjnrs" [bc331d87-4758-44bb-beed-a74c7b6c1f89] Running + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p kubenet-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --network-plugin=kubenet --driver=docker --container-runtime=docker: (56.099047696s) +=== RUN TestNetworkPlugins/group/kubenet/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p kubenet-999044 "pgrep -a kubelet" + net_test.go:163: (dbg) TestNetworkPlugins/group/flannel/NetCatPod: app=netcat healthy within 9.002034542s +=== RUN TestNetworkPlugins/group/flannel/DNS + net_test.go:175: (dbg) Run: kubectl --context flannel-999044 exec deployment/netcat -- nslookup kubernetes.default +I1102 23:25:54.749864 37869 config.go:182] Loaded profile config "kubenet-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/kubenet/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context kubenet-999044 replace --force -f testdata/netcat-deployment.yaml +=== RUN TestNetworkPlugins/group/flannel/Localhost + net_test.go:194: (dbg) Run: kubectl --context flannel-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" + net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-t8glm" [9de80383-7400-4d8e-8237-175e17a20102] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) +=== RUN TestNetworkPlugins/group/flannel/HairPin + net_test.go:264: (dbg) Run: kubectl --context flannel-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "flannel" test finished in 5m55.446138331s, failed=false + net_test.go:163: (dbg) TestNetworkPlugins/group/bridge/NetCatPod: app=netcat healthy within 8.002997124s +=== RUN TestNetworkPlugins/group/bridge/DNS + net_test.go:175: (dbg) Run: kubectl --context bridge-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/bridge/Localhost + net_test.go:194: (dbg) Run: kubectl --context bridge-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/bridge/HairPin + net_test.go:264: (dbg) Run: kubectl --context bridge-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "bridge" test finished in 5m56.57283331s, failed=false + helpers_test.go:352: "netcat-cd4db9dbf-t8glm" [9de80383-7400-4d8e-8237-175e17a20102] Running + net_test.go:163: (dbg) TestNetworkPlugins/group/kubenet/NetCatPod: app=netcat healthy within 7.00281388s +=== RUN TestNetworkPlugins/group/kubenet/DNS + net_test.go:175: (dbg) Run: kubectl --context kubenet-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/kubenet/Localhost + net_test.go:194: (dbg) Run: kubectl --context kubenet-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/kubenet/HairPin + net_test.go:264: (dbg) Run: kubectl --context kubenet-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "kubenet" test finished in 6m2.581452598s, failed=false + net_test.go:112: (dbg) Done: out/minikube-linux-amd64 start -p enable-default-cni-999044 --memory=3072 --alsologtostderr --wait=true --wait-timeout=15m --enable-default-cni=true --driver=docker --container-runtime=docker: (59.428701172s) +=== RUN TestNetworkPlugins/group/enable-default-cni/KubeletFlags + net_test.go:133: (dbg) Run: out/minikube-linux-amd64 ssh -p enable-default-cni-999044 "pgrep -a kubelet" +I1102 23:26:06.173370 37869 config.go:182] Loaded profile config "enable-default-cni-999044": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 +=== RUN TestNetworkPlugins/group/enable-default-cni/NetCatPod + net_test.go:149: (dbg) Run: kubectl --context enable-default-cni-999044 replace --force -f testdata/netcat-deployment.yaml + net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: waiting 15m0s for pods matching "app=netcat" in namespace "default" ... + helpers_test.go:352: "netcat-cd4db9dbf-h69w6" [89714356-9111-4f71-953e-c8949c1c52ec] Pending / Ready:ContainersNotReady (containers with unready status: [dnsutils]) / ContainersReady:ContainersNotReady (containers with unready status: [dnsutils]) + net_test.go:211: + ----------------------- debugLogs start: flannel-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-r5v44 + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.85.2 flannel-999044 + 192.168.85.1 host.minikube.internal + 192.168.85.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.85.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/flannel-999044 Ready control-plane 48s v1.34.1 192.168.85.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 47s + default service/netcat ClusterIP 10.98.0.45 8080/TCP 15s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 46s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.85.2:8443 47s + default endpoints/netcat 10.244.0.3:8080 15s + kube-system endpoints/k8s.io-minikube-hostpath 39s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 40s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-flannel daemonset.apps/kube-flannel-ds 1 1 1 1 1 45s kube-flannel ghcr.io/flannel-io/flannel:v0.27.4 app=flannel + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 46s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 15s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 46s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-r5v44 1/1 Running 0 15s 10.244.0.3 flannel-999044 + kube-flannel pod/kube-flannel-ds-n2vp7 1/1 Running 0 40s 192.168.85.2 flannel-999044 + kube-system pod/coredns-66bc5c9577-vnq76 1/1 Running 0 40s 10.244.0.2 flannel-999044 + kube-system pod/etcd-flannel-999044 1/1 Running 0 46s 192.168.85.2 flannel-999044 + kube-system pod/kube-apiserver-flannel-999044 1/1 Running 0 46s 192.168.85.2 flannel-999044 + kube-system pod/kube-controller-manager-flannel-999044 1/1 Running 0 46s 192.168.85.2 flannel-999044 + kube-system pod/kube-proxy-kh6wp 1/1 Running 0 40s 192.168.85.2 flannel-999044 + kube-system pod/kube-scheduler-flannel-999044 1/1 Running 0 46s 192.168.85.2 flannel-999044 + kube-system pod/storage-provisioner 1/1 Running 0 39s 192.168.85.2 flannel-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + 62c7e68635685 15 seconds ago Ready netcat-cd4db9dbf-r5v44 default 0 (default) + 385deb28efb1b 25 seconds ago Ready coredns-66bc5c9577-vnq76 kube-system 2 (default) + 3b78cbe0cd110 26 seconds ago NotReady coredns-66bc5c9577-vnq76 kube-system 1 (default) + bbe397e932a52 27 seconds ago NotReady coredns-66bc5c9577-vnq76 kube-system 0 (default) + 532109cc7b0b3 27 seconds ago Ready storage-provisioner kube-system 0 (default) + c90f9da6ef9fa 41 seconds ago Ready kube-flannel-ds-n2vp7 kube-flannel 0 (default) + 851cd381ac54e 41 seconds ago Ready kube-proxy-kh6wp kube-system 0 (default) + 0df7c613c96d8 50 seconds ago Ready kube-scheduler-flannel-999044 kube-system 0 (default) + bd7ec8558f338 50 seconds ago Ready kube-controller-manager-flannel-999044 kube-system 0 (default) + 7571d93fc4dbc 50 seconds ago Ready kube-apiserver-flannel-999044 kube-system 0 (default) + f8883bb7ee127 50 seconds ago Ready etcd-flannel-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + eeacbb00fc912 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 14 seconds ago Running dnsutils 0 62c7e68635685 netcat-cd4db9dbf-r5v44 default + 8af11b9d773aa 52546a367cc9e 25 seconds ago Running coredns 0 385deb28efb1b coredns-66bc5c9577-vnq76 kube-system + 2582bf8251109 6e38f40d628db 27 seconds ago Running storage-provisioner 0 532109cc7b0b3 storage-provisioner kube-system + 9dcc710d8c272 e83704a177312 27 seconds ago Running kube-flannel 0 c90f9da6ef9fa kube-flannel-ds-n2vp7 kube-flannel + 66c3e3470d59f ghcr.io/flannel-io/flannel@sha256:2ff3c5cb44d0e27b09f27816372084c98fa12486518ca95cb4a970f4a1a464c4 27 seconds ago Exited install-cni 0 c90f9da6ef9fa kube-flannel-ds-n2vp7 kube-flannel + 598e65e6440d7 ghcr.io/flannel-io/flannel-cni-plugin@sha256:25bd091c1867d0237432a4bcb5da720f39198b7d80edcae3bdf08262d242985c 29 seconds ago Exited install-cni-plugin 0 c90f9da6ef9fa kube-flannel-ds-n2vp7 kube-flannel + 3a059aa894af2 fc25172553d79 40 seconds ago Running kube-proxy 0 851cd381ac54e kube-proxy-kh6wp kube-system + 500cc8e5160bf 7dd6aaa1717ab 50 seconds ago Running kube-scheduler 0 0df7c613c96d8 kube-scheduler-flannel-999044 kube-system + 6a7f24751a4f3 c80c8dbafe7dd 50 seconds ago Running kube-controller-manager 0 bd7ec8558f338 kube-controller-manager-flannel-999044 kube-system + 18ea93dc408f9 c3994bc696102 50 seconds ago Running kube-apiserver 0 7571d93fc4dbc kube-apiserver-flannel-999044 kube-system + 6d5f4a2db7053 5f1f5298c888d 50 seconds ago Running etcd 0 f8883bb7ee127 etcd-flannel-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:25:45 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 16s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-r5v44 + Namespace: default + Priority: 0 + Service Account: default + Node: flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:25:45 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://eeacbb00fc91228f9a9dcd83f28cd0e92deefb07a8ed9964f7b2bf7fe168bd51 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:25:47 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7bq7d (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-7bq7d: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 16s default-scheduler Successfully assigned default/netcat-cd4db9dbf-r5v44 to flannel-999044 + Normal Pulling 15s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 14s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 923ms (923ms including waiting). Image size: 127004766 bytes. + Normal Created 14s kubelet Created container: dnsutils + Normal Started 14s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:25:14 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 41s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 41s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-vnq76 + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:25:34 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://8af11b9d773aab3f997eb6ea70a3bad915e25559d6ad78c2e6c92e95e2c202a0 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:25:36 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-h8djq (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-h8djq: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning FailedScheduling 41s default-scheduler 0/1 nodes are available: 1 node(s) had untolerated taint {node.kubernetes.io/not-ready: }. no new claims to deallocate, preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling. + Normal Scheduled 27s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-vnq76 to flannel-999044 + Warning FailedCreatePodSandBox 27s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4" network for pod "coredns-66bc5c9577-vnq76": networkPlugin cni failed to set up pod "coredns-66bc5c9577-vnq76_kube-system" network: plugin type="flannel" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node. + Warning FailedCreatePodSandBox 26s kubelet Failed to create pod sandbox: rpc error: code = Unknown desc = failed to set up sandbox container "3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c" network for pod "coredns-66bc5c9577-vnq76": networkPlugin cni failed to set up pod "coredns-66bc5c9577-vnq76_kube-system" network: plugin type="flannel" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node. + Normal SandboxChanged 25s (x2 over 26s) kubelet Pod sandbox changed, it will be killed and re-created. + Normal Pulled 25s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 25s kubelet Created container: coredns + Normal Started 25s kubelet Started container coredns + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + .:53 + [INFO] plugin/reload: Running configuration SHA512 = fa9a0cdcdddcb4be74a0eaf7cfcb211c40e29ddf5507e03bbfc0065bade31f0f2641a2513136e246f32328dd126fc93236fb5c595246f0763926a524386705e8 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:32853 - 31088 "HINFO IN 8427711337227271266.8753751939212179441. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.034618924s + [INFO] 10.244.0.3:48586 - 53150 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000202711s + [INFO] 10.244.0.3:49021 - 17242 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000070484s + [INFO] 10.244.0.3:59928 - 7585 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000150165s + [INFO] 10.244.0.3:51897 - 23310 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000111068s + [INFO] 10.244.0.3:51897 - 23549 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000185778s + [INFO] 10.244.0.3:36965 - 27238 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000122013s + [INFO] 10.244.0.3:46450 - 45805 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000111576s + [INFO] 10.244.0.3:49896 - 13227 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000059388s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-flannel-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:25:14 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.85.2:8443 + kubernetes.io/config.hash: 3690f33e832437d997f72405f698fc57 + kubernetes.io/config.mirror: 3690f33e832437d997f72405f698fc57 + kubernetes.io/config.seen: 2025-11-02T23:25:14.565546779Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: Node/flannel-999044 + Containers: + kube-apiserver: + Container ID: docker://18ea93dc408f91fe3548595f4045f8e889a269ed28dbc0f82de093f06f2c6010 + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.85.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:25:11 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.85.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.85.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.85.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Pulled 50s kubelet Container image "registry.k8s.io/kube-apiserver:v1.34.1" already present on machine + Normal Created 50s kubelet Created container: kube-apiserver + Normal Started 50s kubelet Started container kube-apiserver + + + >>> k8s: api server logs: + I1102 23:25:11.492835 1 options.go:263] external host was not specified, using 192.168.85.2 + I1102 23:25:11.494816 1 server.go:150] Version: v1.34.1 + I1102 23:25:11.494833 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:25:11.948090 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:25:11.948110 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:25:11.948116 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:25:11.948119 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:25:11.948122 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:25:11.948126 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:25:11.948130 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:25:11.948133 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:25:11.948137 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:25:11.948139 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:25:11.948143 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:25:11.948145 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:25:11.957755 1 logging.go:55] [core] [Channel #2 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.957828 1 logging.go:55] [core] [Channel #1 SubChannel #4]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.958563 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:25:11.963379 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:11.966792 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:25:11.966802 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:25:11.966984 1 instance.go:239] Using reconciler: lease + W1102 23:25:11.967448 1 logging.go:55] [core] [Channel #9 SubChannel #10]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.971007 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.977835 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.983607 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:25:11.983621 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:25:11.985829 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:25:11.986322 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.990523 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.993769 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.998037 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.002956 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.007113 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.011064 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.015438 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.019067 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.023169 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.027652 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.032557 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.036513 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.042769 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.046424 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.050847 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.054239 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:12.071787 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:25:12.071970 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:25:12.072656 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.075825 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.079406 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.083120 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.086985 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.090788 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.095572 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.099588 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.103973 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.107861 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.111637 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.115237 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.119370 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.122869 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.126694 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.130069 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.133662 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.138274 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.142127 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.146380 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.153436 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.157875 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.162382 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.165768 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:12.169214 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:25:12.169813 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.174426 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.180545 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.183716 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.187120 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.195069 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.199937 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.203931 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.207571 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.211015 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.214186 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.217707 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.220986 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.229667 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.233422 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.237800 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:25:12.248482 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:25:12.248497 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.248501 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.248782 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:25:12.248787 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.249393 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:25:12.249879 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:25:12.249886 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:25:12.249889 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:25:12.250814 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:25:12.250828 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:25:12.251476 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:25:12.251484 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.251487 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.251878 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:25:12.251884 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.251887 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:25:12.252308 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:25:12.252317 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.253963 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:25:12.253974 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.254294 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:25:12.254299 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.254305 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.254806 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:25:12.254811 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:25:12.255969 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:25:12.255979 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.255983 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.256301 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:25:12.256307 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.256310 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.257955 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:25:12.257970 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.257974 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.258753 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:25:12.258761 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:25:12.258764 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:25:12.258766 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.261299 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:25:12.261311 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:25:12.261314 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:25:12.262589 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:25:12.262599 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.262602 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.262995 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:25:12.263002 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.263035 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:25:12.264796 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:25:12.264813 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.264817 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:25:12.267358 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:25:12.278779 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:25:12.278790 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.499513 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:12.499515 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:12.499768 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:25:12.500031 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:25:12.500075 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:25:12.500259 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:25:12.500274 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:25:12.500378 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:25:12.500455 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:25:12.500517 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:25:12.500518 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:25:12.500522 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:25:12.500524 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:25:12.500542 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:25:12.500552 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:25:12.500643 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:25:12.500664 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:25:12.500685 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:25:12.500803 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:25:12.500808 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:25:12.500842 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:25:12.500846 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:25:12.500869 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:12.500963 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:12.501099 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:25:12.501107 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:25:12.501119 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:25:12.501199 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:25:12.501206 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:25:12.501235 1 controller.go:142] Starting OpenAPI controller + I1102 23:25:12.501250 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:25:12.501259 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:25:12.501271 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:25:12.501281 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:25:12.501287 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:25:12.501295 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:25:12.503586 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:25:12.503630 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:25:12.559266 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:25:12.564421 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:12.564430 1 policy_source.go:240] refreshing policies + E1102 23:25:12.580671 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" + I1102 23:25:12.600786 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:25:12.600792 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:25:12.600810 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:25:12.600818 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:25:12.600820 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:25:12.600871 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:25:12.600889 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:25:12.600894 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:25:12.600906 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:25:12.601129 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:25:12.601278 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:25:12.601304 1 aggregator.go:171] initial CRD sync complete... + I1102 23:25:12.601312 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:25:12.601315 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:25:12.601319 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:25:12.601433 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:25:12.603010 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:12.603034 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:25:12.603762 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:25:12.605975 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:25:12.606019 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:12.782556 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:25:13.503480 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:25:13.505825 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:25:13.505838 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:25:13.766593 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:25:13.784945 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:25:13.804655 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:25:13.807564 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.85.2] + I1102 23:25:13.808144 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:25:13.810543 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:25:14.523731 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:25:14.821383 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:25:14.825692 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:25:14.830340 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:25:20.330459 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:20.332661 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:20.424555 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:25:20.624973 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:25:45.715892 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.98.0.45"} + E1102 23:25:54.815083 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:59440: use of closed network connection + E1102 23:25:54.886631 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:59454: use of closed network connection + E1102 23:25:54.954795 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:59476: use of closed network connection + E1102 23:25:55.042337 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:59494: use of closed network connection + E1102 23:26:00.105621 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:59510: use of closed network connection + E1102 23:26:00.174125 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42558: use of closed network connection + E1102 23:26:00.236473 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42580: use of closed network connection + E1102 23:26:00.300266 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42588: use of closed network connection + E1102 23:26:00.365041 1 conn.go:339] Error on socket receive: read tcp 192.168.85.2:8443->192.168.85.1:42614: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-flannel.conflist + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 3: eth0@if362: mtu 1500 qdisc noqueue state UP group default + link/ether 1a:58:82:63:f4:98 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.85.2/24 brd 192.168.85.255 scope global eth0 + valid_lft forever preferred_lft forever + 4: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 4a:a0:96:03:df:d1 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 5: flannel.1: mtu 1450 qdisc noqueue state UNKNOWN group default + link/ether de:09:1e:f3:cd:17 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.0/32 scope global flannel.1 + valid_lft forever preferred_lft forever + inet6 fe80::dc09:1eff:fef3:cd17/64 scope link + valid_lft forever preferred_lft forever + 6: cni0: mtu 1450 qdisc noqueue state UP group default qlen 1000 + link/ether 1a:b1:f2:02:32:05 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/24 brd 10.244.0.255 scope global cni0 + valid_lft forever preferred_lft forever + inet6 fe80::18b1:f2ff:fe02:3205/64 scope link + valid_lft forever preferred_lft forever + 7: vetha855301a@if3: mtu 1450 qdisc noqueue master cni0 state UP group default + link/ether 26:ea:a2:3b:25:e0 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::24ea:a2ff:fe3b:25e0/64 scope link + valid_lft forever preferred_lft forever + 8: vetha313a099@if3: mtu 1450 qdisc noqueue master cni0 state UP group default + link/ether 3a:f7:36:e2:05:b7 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::38f7:36ff:fee2:5b7/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.85.1 dev eth0 + 10.244.0.0/24 dev cni0 proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.85.0/24 dev eth0 proto kernel scope link src 192.168.85.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:02 2025 + *mangle + :PREROUTING ACCEPT [28149:95813496] + :INPUT ACCEPT [28114:95810698] + :FORWARD ACCEPT [35:2798] + :OUTPUT ACCEPT [20892:5655511] + :POSTROUTING ACCEPT [20927:5658309] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:26:02 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:02 2025 + *filter + :INPUT ACCEPT [4556:1133124] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4515:1455212] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :FLANNEL-FWD - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A FORWARD -m comment --comment "flanneld forward" -j FLANNEL-FWD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A FLANNEL-FWD -s 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT + -A FLANNEL-FWD -d 10.244.0.0/16 -m comment --comment "flanneld forward" -j ACCEPT + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:26:02 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:02 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [61:3660] + :POSTROUTING ACCEPT [70:4335] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :FLANNEL-POSTRTG - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-BL4BN7UU5M2OIW4P - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.85.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.85.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.85.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -m comment --comment "flanneld masq" -j FLANNEL-POSTRTG + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.85.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:39083 + -A DOCKER_OUTPUT -d 192.168.85.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:59742 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 39083 -j SNAT --to-source 192.168.85.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 59742 -j SNAT --to-source 192.168.85.1:53 + -A FLANNEL-POSTRTG -m mark --mark 0x4000/0x4000 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG -s 10.244.0.0/24 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG -s 10.244.0.0/16 -d 10.244.0.0/24 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/24 -m comment --comment "flanneld masq" -j RETURN + -A FLANNEL-POSTRTG -s 10.244.0.0/16 ! -d 224.0.0.0/4 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully + -A FLANNEL-POSTRTG ! -s 10.244.0.0/16 -d 10.244.0.0/16 -m comment --comment "flanneld masq" -j MASQUERADE --random-fully + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-BL4BN7UU5M2OIW4P -s 192.168.85.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-BL4BN7UU5M2OIW4P -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.85.2:8443 + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.98.0.45/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.85.2:8443" -j KUBE-SEP-BL4BN7UU5M2OIW4P + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.98.0.45/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:26:02 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 55 3460 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.85.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 62 packets, 3720 bytes) + pkts bytes target prot opt in out source destination + 871 75207 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 730 68485 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.85.1 + 93 5580 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 71 packets, 4395 bytes) + pkts bytes target prot opt in out source destination + 881 75942 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.85.1 + 478 40806 FLANNEL-POSTRTG 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* flanneld masq */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.85.1 tcp dpt:53 to:127.0.0.11:39083 + 731 68570 DNAT 17 -- * * 0.0.0.0/0 192.168.85.1 udp dpt:53 to:127.0.0.11:59742 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:39083 to:192.168.85.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:59742 to:192.168.85.1:53 + + Chain FLANNEL-POSTRTG (1 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match 0x4000/0x4000 /* flanneld masq */ + 12 855 RETURN 0 -- * * 10.244.0.0/24 10.244.0.0/16 /* flanneld masq */ + 0 0 RETURN 0 -- * * 10.244.0.0/16 10.244.0.0/24 /* flanneld masq */ + 0 0 RETURN 0 -- * * !10.244.0.0/16 10.244.0.0/24 /* flanneld masq */ + 0 0 MASQUERADE 0 -- * * 10.244.0.0/16 !224.0.0.0/4 /* flanneld masq */ random-fully + 0 0 MASQUERADE 0 -- * * !10.244.0.0/16 10.244.0.0/16 /* flanneld masq */ random-fully + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 71 4395 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-BL4BN7UU5M2OIW4P (1 references) + pkts bytes target prot opt in out source destination + 2 120 KUBE-MARK-MASQ 0 -- * * 192.168.85.2 0.0.0.0/0 /* default/kubernetes:https */ + 5 300 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.85.2:8443 + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.98.0.45 /* default/netcat cluster IP */ tcp dpt:8080 + 98 5880 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 2 120 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 5 300 KUBE-SEP-BL4BN7UU5M2OIW4P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.85.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.98.0.45 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe flannel daemon set: + Name: kube-flannel-ds + Namespace: kube-flannel + Selector: app=flannel + Node-Selector: + Labels: app=flannel + k8s-app=flannel + tier=node + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: app=flannel + tier=node + Service Account: flannel + Init Containers: + install-cni-plugin: + Image: ghcr.io/flannel-io/flannel-cni-plugin:v1.8.0-flannel1 + Port: + Host Port: + Command: + cp + Args: + -f + /flannel + /opt/cni/bin/flannel + Environment: + Mounts: + /opt/cni/bin from cni-plugin (rw) + install-cni: + Image: ghcr.io/flannel-io/flannel:v0.27.4 + Port: + Host Port: + Command: + cp + Args: + -f + /etc/kube-flannel/cni-conf.json + /etc/cni/net.d/10-flannel.conflist + Environment: + Mounts: + /etc/cni/net.d from cni (rw) + /etc/kube-flannel/ from flannel-cfg (rw) + Containers: + kube-flannel: + Image: ghcr.io/flannel-io/flannel:v0.27.4 + Port: + Host Port: + Command: + /opt/bin/flanneld + Args: + --ip-masq + --kube-subnet-mgr + Requests: + cpu: 100m + memory: 50Mi + Environment: + POD_NAME: (v1:metadata.name) + POD_NAMESPACE: (v1:metadata.namespace) + EVENT_QUEUE_DEPTH: 5000 + CONT_WHEN_CACHE_NOT_READY: false + Mounts: + /etc/kube-flannel/ from flannel-cfg (rw) + /run/flannel from run (rw) + /run/xtables.lock from xtables-lock (rw) + Volumes: + run: + Type: HostPath (bare host directory volume) + Path: /run/flannel + HostPathType: + cni-plugin: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: + cni: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + flannel-cfg: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-flannel-cfg + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + Priority Class Name: system-node-critical + Node-Selectors: + Tolerations: :NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 42s daemonset-controller Created pod: kube-flannel-ds-n2vp7 + + + >>> k8s: describe flannel pod(s): + Name: kube-flannel-ds-n2vp7 + Namespace: kube-flannel + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: flannel + Node: flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:25:20 +0000 + Labels: app=flannel + controller-revision-hash=6dbf467c8 + pod-template-generation=1 + tier=node + Annotations: + Status: Running + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: DaemonSet/kube-flannel-ds + Init Containers: + install-cni-plugin: + Container ID: docker://598e65e6440d7dab0717ffd9479baad46841beb559ae47666b4faa246fb31622 + Image: ghcr.io/flannel-io/flannel-cni-plugin:v1.8.0-flannel1 + Image ID: docker-pullable://ghcr.io/flannel-io/flannel-cni-plugin@sha256:25bd091c1867d0237432a4bcb5da720f39198b7d80edcae3bdf08262d242985c + Port: + Host Port: + Command: + cp + Args: + -f + /flannel + /opt/cni/bin/flannel + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:25:32 +0000 + Finished: Sun, 02 Nov 2025 23:25:32 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /opt/cni/bin from cni-plugin (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-chrzs (ro) + install-cni: + Container ID: docker://66c3e3470d59f875fc92049b8ab0495dec2c3d15e242f56d7a7a2e9794d14c96 + Image: ghcr.io/flannel-io/flannel:v0.27.4 + Image ID: docker-pullable://ghcr.io/flannel-io/flannel@sha256:2ff3c5cb44d0e27b09f27816372084c98fa12486518ca95cb4a970f4a1a464c4 + Port: + Host Port: + Command: + cp + Args: + -f + /etc/kube-flannel/cni-conf.json + /etc/cni/net.d/10-flannel.conflist + State: Terminated + Reason: Completed + Exit Code: 0 + Started: Sun, 02 Nov 2025 23:25:34 +0000 + Finished: Sun, 02 Nov 2025 23:25:34 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /etc/cni/net.d from cni (rw) + /etc/kube-flannel/ from flannel-cfg (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-chrzs (ro) + Containers: + kube-flannel: + Container ID: docker://9dcc710d8c2724b09955d8093dc905f01fb546a404d39b06f19787fcda1ae7cb + Image: ghcr.io/flannel-io/flannel:v0.27.4 + Image ID: docker-pullable://ghcr.io/flannel-io/flannel@sha256:2ff3c5cb44d0e27b09f27816372084c98fa12486518ca95cb4a970f4a1a464c4 + Port: + Host Port: + Command: + /opt/bin/flanneld + Args: + --ip-masq + --kube-subnet-mgr + State: Running + Started: Sun, 02 Nov 2025 23:25:34 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 100m + memory: 50Mi + Environment: + POD_NAME: kube-flannel-ds-n2vp7 (v1:metadata.name) + POD_NAMESPACE: kube-flannel (v1:metadata.namespace) + EVENT_QUEUE_DEPTH: 5000 + CONT_WHEN_CACHE_NOT_READY: false + Mounts: + /etc/kube-flannel/ from flannel-cfg (rw) + /run/flannel from run (rw) + /run/xtables.lock from xtables-lock (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-chrzs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + run: + Type: HostPath (bare host directory volume) + Path: /run/flannel + HostPathType: + cni-plugin: + Type: HostPath (bare host directory volume) + Path: /opt/cni/bin + HostPathType: + cni: + Type: HostPath (bare host directory volume) + Path: /etc/cni/net.d + HostPathType: + flannel-cfg: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-flannel-cfg + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + kube-api-access-chrzs: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoSchedule op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 42s default-scheduler Successfully assigned kube-flannel/kube-flannel-ds-n2vp7 to flannel-999044 + Normal Pulling 41s kubelet Pulling image "ghcr.io/flannel-io/flannel-cni-plugin:v1.8.0-flannel1" + Normal Pulled 30s kubelet Successfully pulled image "ghcr.io/flannel-io/flannel-cni-plugin:v1.8.0-flannel1" in 11.148s (11.148s including waiting). Image size: 10842604 bytes. + Normal Created 30s kubelet Created container: install-cni-plugin + Normal Started 30s kubelet Started container install-cni-plugin + Normal Pulling 30s kubelet Pulling image "ghcr.io/flannel-io/flannel:v0.27.4" + Normal Pulled 28s kubelet Successfully pulled image "ghcr.io/flannel-io/flannel:v0.27.4" in 1.394s (1.394s including waiting). Image size: 91386371 bytes. + Normal Created 28s kubelet Created container: install-cni + Normal Started 28s kubelet Started container install-cni + Normal Pulled 28s kubelet Container image "ghcr.io/flannel-io/flannel:v0.27.4" already present on machine + Normal Created 28s kubelet Created container: kube-flannel + Normal Started 28s kubelet Started container kube-flannel + + + >>> k8s: flannel container(s) logs (current): + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.784307 1 iptables.go:101] Current network or subnet (10.244.0.0/16, 10.244.0.0/24) is not equal to previous one (0.0.0.0/0, 0.0.0.0/0), trying to recycle old iptables rules + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.794637 1 iptables.go:111] Setting up masking rules + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.796112 1 iptables.go:212] Changing default FORWARD chain policy to ACCEPT + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.797240 1 main.go:467] Wrote subnet file to /run/flannel/subnet.env + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.797251 1 main.go:471] Running backend. + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.797341 1 vxlan_network.go:68] watching for new subnet leases + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.797365 1 vxlan_network.go:115] starting vxlan device watcher + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.801348 1 main.go:492] Waiting for all goroutines to exit + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.802889 1 iptables.go:358] bootstrap done + [pod/kube-flannel-ds-n2vp7/kube-flannel] I1102 23:25:35.807232 1 iptables.go:358] bootstrap done + + + >>> k8s: flannel container(s) logs (previous): + error: previous terminated container "install-cni-plugin" in pod "kube-flannel-ds-n2vp7" not found + error: previous terminated container "install-cni" in pod "kube-flannel-ds-n2vp7" not found + error: previous terminated container "kube-flannel" in pod "kube-flannel-ds-n2vp7" not found + + + >>> host: /run/flannel/subnet.env: + FLANNEL_NETWORK=10.244.0.0/16 + FLANNEL_SUBNET=10.244.0.1/24 + FLANNEL_MTU=1450 + FLANNEL_IPMASQ=true + + + >>> host: /etc/kube-flannel/cni-conf.json: + cat: /etc/kube-flannel/cni-conf.json: No such file or directory + ssh: Process exited with status 1 + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 43s daemonset-controller Created pod: kube-proxy-kh6wp + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-kh6wp + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: flannel-999044/192.168.85.2 + Start Time: Sun, 02 Nov 2025 23:25:20 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.85.2 + IPs: + IP: 192.168.85.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://3a059aa894af237c81f08f3907314916b26a0bf75edd30b7129b82a04fc7cb1b + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:25:21 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-c24ms (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-c24ms: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 43s default-scheduler Successfully assigned kube-system/kube-proxy-kh6wp to flannel-999044 + Normal Pulled 42s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 42s kubelet Created container: kube-proxy + Normal Started 42s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:25:21.276726 1 server_linux.go:53] "Using iptables proxy" + I1102 23:25:21.312850 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:25:21.412966 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:25:21.412991 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.85.2"] + E1102 23:25:21.413039 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:25:21.445485 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:25:21.445513 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:25:21.450443 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:25:21.451066 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:25:21.451154 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:25:21.452831 1 config.go:200] "Starting service config controller" + I1102 23:25:21.452847 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:25:21.452860 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:25:21.452862 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:25:21.452870 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:25:21.452874 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:25:21.453426 1 config.go:309] "Starting node config controller" + I1102 23:25:21.453436 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:25:21.453440 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:25:21.553728 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + I1102 23:25:21.553740 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:25:21.553749 1 shared_informer.go:356] "Caches are synced" controller="service config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://flannel-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://flannel-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:14 UTC; 49s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2234 (kubelet) + Tasks: 16 (limit: 629145) + Memory: 34.4M + CPU: 1.069s + CGroup: /system.slice/kubelet.service + └─2234 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=flannel-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2 + + Nov 02 23:25:35 flannel-999044 kubelet[2234]: I1102 23:25:35.689085 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.689072132 podStartE2EDuration="14.689072132s" podCreationTimestamp="2025-11-02 23:25:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:35.688889298 +0000 UTC m=+21.171178848" watchObservedRunningTime="2025-11-02 23:25:35.689072132 +0000 UTC m=+21.171361680" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786206 2234 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786245 2234 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786260 2234 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786291 2234 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\\\" network for pod \\\"coredns-66bc5c9577-vnq76\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-vnq76_kube-system\\\" network: plugin type=\\\"flannel\\\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node.\"" pod="kube-system/coredns-66bc5c9577-vnq76" podUID="73b22dda-4769-4201-bf3a-3fc795b3da42" + Nov 02 23:25:36 flannel-999044 kubelet[2234]: I1102 23:25:36.689433 2234 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c" + Nov 02 23:25:37 flannel-999044 kubelet[2234]: I1102 23:25:37.714015 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-vnq76" podStartSLOduration=17.714001071 podStartE2EDuration="17.714001071s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:37.70828616 +0000 UTC m=+23.190575711" watchObservedRunningTime="2025-11-02 23:25:37.714001071 +0000 UTC m=+23.196290616" + Nov 02 23:25:45 flannel-999044 kubelet[2234]: I1102 23:25:45.753699 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bq7d\" (UniqueName: \"kubernetes.io/projected/58acc91e-9380-4dfb-ab88-7c9ca7303409-kube-api-access-7bq7d\") pod \"netcat-cd4db9dbf-r5v44\" (UID: \"58acc91e-9380-4dfb-ab88-7c9ca7303409\") " pod="default/netcat-cd4db9dbf-r5v44" + Nov 02 23:25:47 flannel-999044 kubelet[2234]: I1102 23:25:47.751440 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-r5v44" podStartSLOduration=1.8276032070000001 podStartE2EDuration="2.7514255s" podCreationTimestamp="2025-11-02 23:25:45 +0000 UTC" firstStartedPulling="2025-11-02 23:25:46.088241711 +0000 UTC m=+31.570531241" lastFinishedPulling="2025-11-02 23:25:47.012064004 +0000 UTC m=+32.494353534" observedRunningTime="2025-11-02 23:25:47.751303752 +0000 UTC m=+33.233593302" watchObservedRunningTime="2025-11-02 23:25:47.7514255 +0000 UTC m=+33.233715045" + Nov 02 23:26:00 flannel-999044 kubelet[2234]: E1102 23:26:00.174094 2234 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:49540->[::1]:43675: write tcp [::1]:49540->[::1]:43675: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://flannel-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://flannel-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=flannel-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.85.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:25:04 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:25:04 flannel-999044 kubelet[1530]: E1102 23:25:04.802829 1530 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:04 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:04 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:05 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:05 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:25:05 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:25:05 flannel-999044 kubelet[1540]: E1102 23:25:05.530840 1540 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:05 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:05 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:06 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:06 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:25:06 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:25:06 flannel-999044 kubelet[1698]: E1102 23:25:06.283305 1698 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:06 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:06 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:06 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:06 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:25:07 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:25:07 flannel-999044 kubelet[1707]: E1102 23:25:07.027582 1707 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:07 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:07 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:07 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:07 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:25:07 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:25:07 flannel-999044 kubelet[1720]: E1102 23:25:07.784099 1720 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:07 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:07 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:08 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 5. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:08 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:25:08 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 669. + Nov 02 23:25:08 flannel-999044 kubelet[1729]: E1102 23:25:08.523793 1729 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:08 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:08 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:09 flannel-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 6. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:09 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 722 and the job result is done. + Nov 02 23:25:09 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 722. + Nov 02 23:25:09 flannel-999044 kubelet[1741]: E1102 23:25:09.270357 1741 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:09 flannel-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:09 flannel-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:09 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 775 and the job result is done. + Nov 02 23:25:09 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 776. + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.622179 1773 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.622226 1773 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.622247 1773 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.622251 1773 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.622365 1773 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: E1102 23:25:09.919860 1773 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.85.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.920268 1773 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.922889 1773 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.926785 1773 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.926802 1773 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.926985 1773 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.926998 1773 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"flannel-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.927081 1773 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.927086 1773 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:09 flannel-999044 kubelet[1773]: I1102 23:25:09.927135 1773 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.109958 1773 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110111 1773 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110122 1773 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110143 1773 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110155 1773 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.110567 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.85.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dflannel-999044&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.110607 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.85.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110644 1773 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.110979 1773 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.111000 1773 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: W1102 23:25:10.111036 1773 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.111635 1773 server.go:1262] "Started kubelet" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.112287 1773 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.112347 1773 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.112559 1773 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.112600 1773 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.113366 1773 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.113380 1773 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.113570 1773 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.85.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.85.2:8443: connect: connection refused" event="&Event{ObjectMeta:{flannel-999044.1874542dd9b6d87c default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:flannel-999044,UID:flannel-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:flannel-999044,},FirstTimestamp:2025-11-02 23:25:10.111606908 +0000 UTC m=+0.601475867,LastTimestamp:2025-11-02 23:25:10.111606908 +0000 UTC m=+0.601475867,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:flannel-999044,}" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.116314 1773 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.116266 1773 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.116655 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.117301 1773 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.117337 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.85.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.117358 1773 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.117398 1773 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.118868 1773 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.120797 1773 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.120811 1773 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.128696 1773 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.128786 1773 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.128804 1773 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.133431 1773 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.134182 1773 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.134197 1773 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.134210 1773 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.134245 1773 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.134496 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.85.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.217526 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.234670 1773 kubelet.go:2451] "Skipping pod synchronization" err="container runtime status check may not have completed yet" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.317861 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.318044 1773 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.418225 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.435518 1773 kubelet.go:2451] "Skipping pod synchronization" err="container runtime status check may not have completed yet" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.518270 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.550987 1773 policy_none.go:49] "None policy: Start" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.551002 1773 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.551012 1773 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.571373 1773 policy_none.go:47] "Start" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.602027 1773 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.602198 1773 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.602209 1773 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.602612 1773 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.605599 1773 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.605683 1773 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"flannel-999044\" not found" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.706115 1773 kubelet_node_status.go:75] "Attempting to register node" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.706373 1773 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.85.2:8443/api/v1/nodes\": dial tcp 192.168.85.2:8443: connect: connection refused" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.718908 1773 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.85.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/flannel-999044?timeout=10s\": dial tcp 192.168.85.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.850459 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.852967 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.862816 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.865728 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.907132 1773 kubelet_node_status.go:75] "Attempting to register node" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: E1102 23:25:10.907367 1773 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.85.2:8443/api/v1/nodes\": dial tcp 192.168.85.2:8443: connect: connection refused" node="flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919645 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-etc-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919680 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-usr-local-share-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919715 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/a4d3425174465677e7dfa360ac494100-kubeconfig\") pod \"kube-scheduler-flannel-999044\" (UID: \"a4d3425174465677e7dfa360ac494100\") " pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919731 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/a62682ee440762df0f57297f78696397-etcd-data\") pod \"etcd-flannel-999044\" (UID: \"a62682ee440762df0f57297f78696397\") " pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919748 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-ca-certs\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919757 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-usr-local-share-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919765 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-etc-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919787 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-k8s-certs\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919818 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/a62682ee440762df0f57297f78696397-etcd-certs\") pod \"etcd-flannel-999044\" (UID: \"a62682ee440762df0f57297f78696397\") " pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919832 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-k8s-certs\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919845 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-flexvolume-dir\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919856 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-usr-share-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919868 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-usr-share-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919876 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-ca-certs\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:10 flannel-999044 kubelet[1773]: I1102 23:25:10.919885 1773 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-kubeconfig\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: E1102 23:25:11.041252 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.85.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dflannel-999044&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: E1102 23:25:11.126551 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.85.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: E1102 23:25:11.214435 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.85.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: I1102 23:25:11.308712 1773 kubelet_node_status.go:75] "Attempting to register node" node="flannel-999044" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: E1102 23:25:11.309029 1773 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.85.2:8443/api/v1/nodes\": dial tcp 192.168.85.2:8443: connect: connection refused" node="flannel-999044" + Nov 02 23:25:11 flannel-999044 kubelet[1773]: E1102 23:25:11.451468 1773 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.85.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.85.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: I1102 23:25:12.110135 1773 kubelet_node_status.go:75] "Attempting to register node" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.146042 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.151139 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.157280 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.162207 1773 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.514354 1773 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"flannel-999044\" not found" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: I1102 23:25:12.608739 1773 kubelet_node_status.go:78] "Successfully registered node" node="flannel-999044" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.608768 1773 kubelet_node_status.go:486] "Error updating node status, will retry" err="error getting node \"flannel-999044\": node \"flannel-999044\" not found" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.614984 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.715328 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.815843 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:12 flannel-999044 kubelet[1773]: E1102 23:25:12.916780 1773 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.017646 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.020321 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.020335 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.021232 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.021241 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.022116 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.022125 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.023027 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.111385 1773 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.117649 1773 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.164152 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.164191 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: I1102 23:25:13.164245 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.165110 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.165111 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:13 flannel-999044 kubelet[1773]: E1102 23:25:13.165328 1773 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-flannel-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[1773]: I1102 23:25:14.166955 1773 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:14 flannel-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 908. + Nov 02 23:25:14 flannel-999044 kubelet[1773]: I1102 23:25:14.493401 1773 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:14 flannel-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:25:14 flannel-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 908 and the job result is done. + Nov 02 23:25:14 flannel-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 908. + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.546862 2234 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.546909 2234 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.546951 2234 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.546955 2234 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.547082 2234 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.547794 2234 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.549012 2234 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.551792 2234 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555245 2234 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555268 2234 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555443 2234 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555451 2234 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"flannel-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555547 2234 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555551 2234 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.555586 2234 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556017 2234 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556175 2234 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556191 2234 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556208 2234 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556229 2234 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.556766 2234 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.557165 2234 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.557186 2234 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.557821 2234 server.go:1262] "Started kubelet" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.557997 2234 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.558120 2234 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.558201 2234 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.558438 2234 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.558587 2234 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.558790 2234 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.560368 2234 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.563296 2234 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: E1102 23:25:14.563721 2234 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"flannel-999044\" not found" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.564150 2234 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.565069 2234 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.563972 2234 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.569363 2234 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.569376 2234 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.575469 2234 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.576522 2234 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.576538 2234 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.576554 2234 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: E1102 23:25:14.576585 2234 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586361 2234 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586375 2234 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586396 2234 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586516 2234 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586524 2234 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586538 2234 policy_none.go:49] "None policy: Start" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586545 2234 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586554 2234 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586625 2234 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.586635 2234 policy_none.go:47] "Start" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: E1102 23:25:14.588999 2234 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.589105 2234 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.589113 2234 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.589262 2234 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: E1102 23:25:14.589565 2234 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.677250 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.677321 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.677440 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.677471 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: E1102 23:25:14.679980 2234 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-flannel-999044\" already exists" pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.689694 2234 kubelet_node_status.go:75] "Attempting to register node" node="flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.692776 2234 kubelet_node_status.go:124] "Node was previously registered" node="flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.692835 2234 kubelet_node_status.go:78] "Successfully registered node" node="flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.864994 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-usr-local-share-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865014 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/a4d3425174465677e7dfa360ac494100-kubeconfig\") pod \"kube-scheduler-flannel-999044\" (UID: \"a4d3425174465677e7dfa360ac494100\") " pod="kube-system/kube-scheduler-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865054 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/a62682ee440762df0f57297f78696397-etcd-certs\") pod \"etcd-flannel-999044\" (UID: \"a62682ee440762df0f57297f78696397\") " pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865062 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-ca-certs\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865072 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-etc-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865080 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-k8s-certs\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865088 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-usr-local-share-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865096 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-etc-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865138 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-flexvolume-dir\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865164 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/a62682ee440762df0f57297f78696397-etcd-data\") pod \"etcd-flannel-999044\" (UID: \"a62682ee440762df0f57297f78696397\") " pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865177 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3690f33e832437d997f72405f698fc57-usr-share-ca-certificates\") pod \"kube-apiserver-flannel-999044\" (UID: \"3690f33e832437d997f72405f698fc57\") " pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865189 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-kubeconfig\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865202 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-usr-share-ca-certificates\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865212 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-ca-certs\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:14 flannel-999044 kubelet[2234]: I1102 23:25:14.865225 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/b1803ec0312bde40f996876eaea36730-k8s-certs\") pod \"kube-controller-manager-flannel-999044\" (UID: \"b1803ec0312bde40f996876eaea36730\") " pod="kube-system/kube-controller-manager-flannel-999044" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.557427 2234 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.565909 2234 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.602136 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.602345 2234 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: E1102 23:25:15.606496 2234 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-flannel-999044\" already exists" pod="kube-system/kube-apiserver-flannel-999044" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: E1102 23:25:15.607029 2234 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-flannel-999044\" already exists" pod="kube-system/etcd-flannel-999044" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.621620 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-flannel-999044" podStartSLOduration=1.621608017 podStartE2EDuration="1.621608017s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.617296953 +0000 UTC m=+1.099586500" watchObservedRunningTime="2025-11-02 23:25:15.621608017 +0000 UTC m=+1.103897569" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.621717 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-flannel-999044" podStartSLOduration=1.621712855 podStartE2EDuration="1.621712855s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.621537454 +0000 UTC m=+1.103827001" watchObservedRunningTime="2025-11-02 23:25:15.621712855 +0000 UTC m=+1.104002405" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.629655 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-flannel-999044" podStartSLOduration=1.629644269 podStartE2EDuration="1.629644269s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.625313828 +0000 UTC m=+1.107603384" watchObservedRunningTime="2025-11-02 23:25:15.629644269 +0000 UTC m=+1.111933824" + Nov 02 23:25:15 flannel-999044 kubelet[2234]: I1102 23:25:15.634898 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-flannel-999044" podStartSLOduration=1.6348869449999999 podStartE2EDuration="1.634886945s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.629637553 +0000 UTC m=+1.111927111" watchObservedRunningTime="2025-11-02 23:25:15.634886945 +0000 UTC m=+1.117176490" + Nov 02 23:25:19 flannel-999044 kubelet[2234]: I1102 23:25:19.558941 2234 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:19 flannel-999044 kubelet[2234]: I1102 23:25:19.559303 2234 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703614 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chrzs\" (UniqueName: \"kubernetes.io/projected/e086ec67-60c2-4c87-b47f-0aaeace7a929-kube-api-access-chrzs\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703646 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc-lib-modules\") pod \"kube-proxy-kh6wp\" (UID: \"3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc\") " pod="kube-system/kube-proxy-kh6wp" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703665 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc-xtables-lock\") pod \"kube-proxy-kh6wp\" (UID: \"3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc\") " pod="kube-system/kube-proxy-kh6wp" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703680 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c24ms\" (UniqueName: \"kubernetes.io/projected/3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc-kube-api-access-c24ms\") pod \"kube-proxy-kh6wp\" (UID: \"3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc\") " pod="kube-system/kube-proxy-kh6wp" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703696 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-plugin\" (UniqueName: \"kubernetes.io/host-path/e086ec67-60c2-4c87-b47f-0aaeace7a929-cni-plugin\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703712 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni\" (UniqueName: \"kubernetes.io/host-path/e086ec67-60c2-4c87-b47f-0aaeace7a929-cni\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703749 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc-kube-proxy\") pod \"kube-proxy-kh6wp\" (UID: \"3c9fd6f5-5cf1-43d0-b5b0-e161716d85cc\") " pod="kube-system/kube-proxy-kh6wp" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703770 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/e086ec67-60c2-4c87-b47f-0aaeace7a929-run\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703782 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flannel-cfg\" (UniqueName: \"kubernetes.io/configmap/e086ec67-60c2-4c87-b47f-0aaeace7a929-flannel-cfg\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:20 flannel-999044 kubelet[2234]: I1102 23:25:20.703795 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/e086ec67-60c2-4c87-b47f-0aaeace7a929-xtables-lock\") pod \"kube-flannel-ds-n2vp7\" (UID: \"e086ec67-60c2-4c87-b47f-0aaeace7a929\") " pod="kube-flannel/kube-flannel-ds-n2vp7" + Nov 02 23:25:21 flannel-999044 kubelet[2234]: I1102 23:25:21.629483 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-kh6wp" podStartSLOduration=1.629468441 podStartE2EDuration="1.629468441s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:21.62932295 +0000 UTC m=+7.111612486" watchObservedRunningTime="2025-11-02 23:25:21.629468441 +0000 UTC m=+7.111757991" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: I1102 23:25:34.531941 2234 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: I1102 23:25:34.592348 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/73b22dda-4769-4201-bf3a-3fc795b3da42-config-volume\") pod \"coredns-66bc5c9577-vnq76\" (UID: \"73b22dda-4769-4201-bf3a-3fc795b3da42\") " pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: I1102 23:25:34.592403 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/a15881a3-20e9-43dc-96e9-53aff40c42b7-tmp\") pod \"storage-provisioner\" (UID: \"a15881a3-20e9-43dc-96e9-53aff40c42b7\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: I1102 23:25:34.592416 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8djq\" (UniqueName: \"kubernetes.io/projected/73b22dda-4769-4201-bf3a-3fc795b3da42-kube-api-access-h8djq\") pod \"coredns-66bc5c9577-vnq76\" (UID: \"73b22dda-4769-4201-bf3a-3fc795b3da42\") " pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: I1102 23:25:34.592433 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7bgk\" (UniqueName: \"kubernetes.io/projected/a15881a3-20e9-43dc-96e9-53aff40c42b7-kube-api-access-k7bgk\") pod \"storage-provisioner\" (UID: \"a15881a3-20e9-43dc-96e9-53aff40c42b7\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: E1102 23:25:34.973635 2234 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." + Nov 02 23:25:34 flannel-999044 kubelet[2234]: E1102 23:25:34.973690 2234 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: E1102 23:25:34.973706 2234 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:34 flannel-999044 kubelet[2234]: E1102 23:25:34.973739 2234 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4\\\" network for pod \\\"coredns-66bc5c9577-vnq76\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-vnq76_kube-system\\\" network: plugin type=\\\"flannel\\\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node.\"" pod="kube-system/coredns-66bc5c9577-vnq76" podUID="73b22dda-4769-4201-bf3a-3fc795b3da42" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: I1102 23:25:35.680240 2234 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: I1102 23:25:35.684671 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-flannel/kube-flannel-ds-n2vp7" podStartSLOduration=2.716764206 podStartE2EDuration="15.684659742s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="2025-11-02 23:25:21.086165085 +0000 UTC m=+6.568454625" lastFinishedPulling="2025-11-02 23:25:34.054060631 +0000 UTC m=+19.536350161" observedRunningTime="2025-11-02 23:25:35.684453888 +0000 UTC m=+21.166743439" watchObservedRunningTime="2025-11-02 23:25:35.684659742 +0000 UTC m=+21.166949295" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: I1102 23:25:35.689085 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.689072132 podStartE2EDuration="14.689072132s" podCreationTimestamp="2025-11-02 23:25:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:35.688889298 +0000 UTC m=+21.171178848" watchObservedRunningTime="2025-11-02 23:25:35.689072132 +0000 UTC m=+21.171361680" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786206 2234 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786245 2234 kuberuntime_sandbox.go:71] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786260 2234 kuberuntime_manager.go:1343] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to set up sandbox container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\" network for pod \"coredns-66bc5c9577-vnq76\": networkPlugin cni failed to set up pod \"coredns-66bc5c9577-vnq76_kube-system\" network: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." pod="kube-system/coredns-66bc5c9577-vnq76" + Nov 02 23:25:35 flannel-999044 kubelet[2234]: E1102 23:25:35.786291 2234 pod_workers.go:1324] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-66bc5c9577-vnq76_kube-system(73b22dda-4769-4201-bf3a-3fc795b3da42)\\\": rpc error: code = Unknown desc = failed to set up sandbox container \\\"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\\\" network for pod \\\"coredns-66bc5c9577-vnq76\\\": networkPlugin cni failed to set up pod \\\"coredns-66bc5c9577-vnq76_kube-system\\\" network: plugin type=\\\"flannel\\\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node.\"" pod="kube-system/coredns-66bc5c9577-vnq76" podUID="73b22dda-4769-4201-bf3a-3fc795b3da42" + Nov 02 23:25:36 flannel-999044 kubelet[2234]: I1102 23:25:36.689433 2234 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c" + Nov 02 23:25:37 flannel-999044 kubelet[2234]: I1102 23:25:37.714015 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-vnq76" podStartSLOduration=17.714001071 podStartE2EDuration="17.714001071s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:37.70828616 +0000 UTC m=+23.190575711" watchObservedRunningTime="2025-11-02 23:25:37.714001071 +0000 UTC m=+23.196290616" + Nov 02 23:25:45 flannel-999044 kubelet[2234]: I1102 23:25:45.753699 2234 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bq7d\" (UniqueName: \"kubernetes.io/projected/58acc91e-9380-4dfb-ab88-7c9ca7303409-kube-api-access-7bq7d\") pod \"netcat-cd4db9dbf-r5v44\" (UID: \"58acc91e-9380-4dfb-ab88-7c9ca7303409\") " pod="default/netcat-cd4db9dbf-r5v44" + Nov 02 23:25:47 flannel-999044 kubelet[2234]: I1102 23:25:47.751440 2234 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-r5v44" podStartSLOduration=1.8276032070000001 podStartE2EDuration="2.7514255s" podCreationTimestamp="2025-11-02 23:25:45 +0000 UTC" firstStartedPulling="2025-11-02 23:25:46.088241711 +0000 UTC m=+31.570531241" lastFinishedPulling="2025-11-02 23:25:47.012064004 +0000 UTC m=+32.494353534" observedRunningTime="2025-11-02 23:25:47.751303752 +0000 UTC m=+33.233593302" watchObservedRunningTime="2025-11-02 23:25:47.7514255 +0000 UTC m=+33.233715045" + Nov 02 23:26:00 flannel-999044 kubelet[2234]: E1102 23:26:00.174094 2234 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:49540->[::1]:43675: write tcp [::1]:49540->[::1]:43675: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.85.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:flannel-999044 + name: system:node:flannel-999044@mk + current-context: system:node:flannel-999044@mk + kind: Config + users: + - name: system:node:flannel-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:16 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: bridge-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: enable-default-cni-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.85.2:8443 + name: flannel-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kubenet-999044 + contexts: + - context: + cluster: bridge-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:16 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: bridge-999044 + name: bridge-999044 + - context: + cluster: enable-default-cni-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: enable-default-cni-999044 + name: enable-default-cni-999044 + - context: + cluster: flannel-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: flannel-999044 + name: flannel-999044 + - context: + cluster: kubenet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kubenet-999044 + name: kubenet-999044 + current-context: enable-default-cni-999044 + kind: Config + users: + - name: bridge-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/bridge-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/bridge-999044/client.key + - name: enable-default-cni-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.key + - name: flannel-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/flannel-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/flannel-999044/client.key + - name: kubenet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "340" + uid: dae08461-2c10-4451-8d5c-20e5eed1bee4 + - apiVersion: v1 + data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "EnableNFTables": false, + "Backend": { + "Type": "vxlan" + } + } + kind: ConfigMap + metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"v1","data":{"cni-conf.json":"{\n \"name\": \"cbr0\",\n \"cniVersion\": \"0.3.1\",\n \"plugins\": [\n {\n \"type\": \"flannel\",\n \"delegate\": {\n \"hairpinMode\": true,\n \"isDefaultGateway\": true\n }\n },\n {\n \"type\": \"portmap\",\n \"capabilities\": {\n \"portMappings\": true\n }\n }\n ]\n}\n","net-conf.json":"{\n \"Network\": \"10.244.0.0/16\",\n \"EnableNFTables\": false,\n \"Backend\": {\n \"Type\": \"vxlan\"\n }\n}\n"},"kind":"ConfigMap","metadata":{"annotations":{},"labels":{"app":"flannel","k8s-app":"flannel","tier":"node"},"name":"kube-flannel-cfg","namespace":"kube-flannel"}} + creationTimestamp: "2025-11-02T23:25:15Z" + labels: + app: flannel + k8s-app: flannel + tier: node + name: kube-flannel-cfg + namespace: kube-flannel + resourceVersion: "301" + uid: 1fb09866-5b58-4358-beb2-7211f783072f + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-flannel + resourceVersion: "341" + uid: 74a5723e-8fcf-4908-a668-f0f92525b0af + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "342" + uid: 514c2ac1-18a5-4c32-bbc6-b0c70a45ec1f + - apiVersion: v1 + data: + jws-kubeconfig-kz3rmc: eyJhbGciOiJIUzI1NiIsImtpZCI6Imt6M3JtYyJ9..UXuG1LSrC_uyhTNZsoygSrDf6byqbdTOC7AzA9qG9PE + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + name: cluster-info + namespace: kube-public + resourceVersion: "345" + uid: b30bf0a3-37e0-4fcf-8927-dec22854dd4c + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "338" + uid: 8114e7a7-5a1f-4f16-a40e-f52ef6968ede + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.85.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + name: coredns + namespace: kube-system + resourceVersion: "390" + uid: 8feb66b8-f656-4be7-9d68-73aea9a432c1 + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIC/B7twBJCqwwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMyMDA1WhcNMzUxMDMxMjMyNTA1 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAK/SY5gsqMXn2yZ491ggO2N9+LUq+4FQoAApZ792Om+hN1or + ptiiCI+frPhQTGF8DqSljZGURhuo97BnW3gqw0kW4iMGXJmc8ijSbrKZdnPXlkyS + TQvWcaYWNm7OqL2/7UqDk7jQ6v5PCASMKgIRtw9VFsqwM3FtdWyas1w97AWiFQi1 + Ev18nDvnc4jboBvCWgwPqkIOFEZtFVosEw477sp5iMsdUfqcIrjHr7GqmLKpp3dP + LpWS0c2368khXE0LUmWAfMRaOtppjCZJ17GtP+vKEZuazadposub3sQKfgxNFG1Y + K6ipLiP2aTSgiK/wkGeM0AQrkLi6EymsngRiw1kCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKEN7tjzBe/mfGmmuJO+ + tNVGbvWYMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAb1LDXoYAaTxhDfkb90as/f2t62nCNDYtSixrEsTzjkMtcxgkA7bdqUAU7 + aUBrCTvGaL5gMha1uVkeeh5UiDQpVdcojr25TWOtqWO2NjuqnoOX/QEB5OaTNM7P + NL+Rnh+h8XRna6HSczt/nqVFmQ+fKL1QLz/hFCCbi57aJaOOrL/lRnGpK35beqLt + xWtAs8O5iW8Ub3kCnPXfSUf6rFDOdY08MP+KnPHRwyRFn9Ydg5t6rN5AqGlf19nS + aoJ9D/3MQ4J3p1B4ipbUp2gjFOqR4EC7/m9zutQJc66XIRUL2hczmmtjwQApOILB + SO6oZJACzEpqMFUcwHiH3oQnl8JV + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:12Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "21" + uid: 2cac3818-c640-4736-b4e4-ebc840c6b6cc + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:12Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "64" + uid: 43344978-fd30-448d-94d4-4c5d6ca70760 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "287" + uid: edd6d8b8-bcab-4162-bd8a-8f9fd8c2301b + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "339" + uid: ab078d64-4135-475e-82c7-19f8c482aa59 + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.85.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "243" + uid: e8c02e1f-e73f-41cd-8fea-414594f4b41f + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "246" + uid: f3147697-72b8-4fe9-b0e2-b48e6baafdbb + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://flannel-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:04 UTC; 1min 0s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1047 (dockerd) + Tasks: 14 + Memory: 277.0M + CPU: 4.107s + CGroup: /system.slice/docker.service + └─1047 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:25:04 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:04.170420152Z" level=info msg="Completed buildkit initialization" + Nov 02 23:25:04 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:04.173801221Z" level=info msg="Daemon has completed initialization" + Nov 02 23:25:04 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:04.173867317Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:25:04 flannel-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:25:04 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:04.173880600Z" level=info msg="API listen on [::]:2376" + Nov 02 23:25:04 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:04.173894136Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:25:32 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:32.301548492Z" level=info msg="ignoring event" container=598e65e6440d7dab0717ffd9479baad46841beb559ae47666b4faa246fb31622 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:25:34 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:34.381667881Z" level=info msg="ignoring event" container=66c3e3470d59f875fc92049b8ab0495dec2c3d15e242f56d7a7a2e9794d14c96 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:25:34 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:34.963346611Z" level=info msg="ignoring event" container=bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:25:35 flannel-999044 dockerd[1047]: time="2025-11-02T23:25:35.775198530Z" level=info msg="ignoring event" container=3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://flannel-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 22 + Running: 18 + Paused: 0 + Stopped: 4 + Images: 11 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: flannel-999044 + ID: 211f4485-dc80-4f93-88f6-2340b50f7cc6 + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://flannel-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://flannel-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:04 UTC; 1min 0s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1355 (cri-dockerd) + Tasks: 14 + Memory: 17.8M + CPU: 701ms + CGroup: /system.slice/cri-docker.service + └─1355 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:25:35 flannel-999044 cri-dockerd[1355]: time="2025-11-02T23:25:35Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c/resolv.conf as [nameserver 192.168.85.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:35 flannel-999044 cri-dockerd[1355]: time="2025-11-02T23:25:35Z" level=error msg="Error adding pod kube-system/coredns-66bc5c9577-vnq76 to network {docker 3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c}:/proc/3297/ns/net:flannel:cbr0: plugin type=\"flannel\" failed (add): failed to load flannel 'subnet.env' file: open /run/flannel/subnet.env: no such file or directory. Check the flannel pod log for this node." + Nov 02 23:25:36 flannel-999044 cri-dockerd[1355]: time="2025-11-02T23:25:36Z" level=info msg="Failed to read pod IP from plugin/docker: networkPlugin cni failed on the status hook for pod \"coredns-66bc5c9577-vnq76_kube-system\": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container \"3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c\"" + Nov 02 23:25:36 flannel-999044 cri-dockerd[1355]: time="2025-11-02T23:25:36Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/385deb28efb1ba71d375d71fb40f9614945b4e8c91e31e24a5df693348380228/resolv.conf as [nameserver 192.168.85.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:36 flannel-999044 cri-dockerd[1355]: map[string]interface {}{"cniVersion":"0.3.1", "hairpinMode":true, "ipMasq":false, "ipam":map[string]interface {}{"ranges":[][]map[string]interface {}{[]map[string]interface {}{map[string]interface {}{"subnet":"10.244.0.0/24"}}}, "routes":[]types.Route{types.Route{Dst:net.IPNet{IP:net.IP{0xa, 0xf4, 0x0, 0x0}, Mask:net.IPMask{0xff, 0xff, 0x0, 0x0}}, GW:net.IP(nil), MTU:0, AdvMSS:0, Priority:0, Table:(*int)(nil), Scope:(*int)(nil)}}, "type":"host-local"}, "isDefaultGateway":true, "isGateway":true, "mtu":(*uint)(0xc000098700), "name":"cbr0", "type":"bridge"} + Nov 02 23:25:36 flannel-999044 cri-dockerd[1355]: delegateAdd: netconf sent to delegate plugin: + Nov 02 23:25:46 flannel-999044 cri-dockerd[1355]: {"cniVersion":"0.3.1","hairpinMode":true,"ipMasq":false,"ipam":{"ranges":[[{"subnet":"10.244.0.0/24"}]],"routes":[{"dst":"10.244.0.0/16"}],"type":"host-local"},"isDefaultGateway":true,"isGateway":true,"mtu":1450,"name":"cbr0","type":"bridge"}time="2025-11-02T23:25:46Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/62c7e686356854921531eda22433ab1872e507740887b1618d41531e91140f81/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:46 flannel-999044 cri-dockerd[1355]: map[string]interface {}{"cniVersion":"0.3.1", "hairpinMode":true, "ipMasq":false, "ipam":map[string]interface {}{"ranges":[][]map[string]interface {}{[]map[string]interface {}{map[string]interface {}{"subnet":"10.244.0.0/24"}}}, "routes":[]types.Route{types.Route{Dst:net.IPNet{IP:net.IP{0xa, 0xf4, 0x0, 0x0}, Mask:net.IPMask{0xff, 0xff, 0x0, 0x0}}, GW:net.IP(nil), MTU:0, AdvMSS:0, Priority:0, Table:(*int)(nil), Scope:(*int)(nil)}}, "type":"host-local"}, "isDefaultGateway":true, "isGateway":true, "mtu":(*uint)(0xc000010770), "name":"cbr0", "type":"bridge"} + Nov 02 23:25:46 flannel-999044 cri-dockerd[1355]: delegateAdd: netconf sent to delegate plugin: + Nov 02 23:25:47 flannel-999044 cri-dockerd[1355]: {"cniVersion":"0.3.1","hairpinMode":true,"ipMasq":false,"ipam":{"ranges":[[{"subnet":"10.244.0.0/24"}]],"routes":[{"dst":"10.244.0.0/16"}],"type":"host-local"},"isDefaultGateway":true,"isGateway":true,"mtu":1450,"name":"cbr0","type":"bridge"}time="2025-11-02T23:25:47Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://flannel-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://flannel-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://flannel-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:03 UTC; 1min 2s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1032 (containerd) + Tasks: 212 + Memory: 103.0M + CPU: 1.279s + CGroup: /system.slice/containerd.service + ├─1032 /usr/bin/containerd + ├─1825 /usr/bin/containerd-shim-runc-v2 -namespace moby -id f8883bb7ee127f45ab9847c3e47858e1587e1b34cf5f920371421b063b360b0c -address /run/containerd/containerd.sock + ├─1827 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 7571d93fc4dbc0315765b5f3e6a3dffc65d7d1c0ef3adacefd979f3d852f3832 -address /run/containerd/containerd.sock + ├─1886 /usr/bin/containerd-shim-runc-v2 -namespace moby -id bd7ec8558f3389bc4bf617a4335882be29a17be12839b0ade0743801b4f43b82 -address /run/containerd/containerd.sock + ├─1900 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0df7c613c96d8ca148dbee4d76a4ceb6c030eecf61ffabaa51076f3829d67243 -address /run/containerd/containerd.sock + ├─2007 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 6d5f4a2db70535e4f7a564a389fd494a61d11234cefaa2bf186d5e4d5a29aabf -address /run/containerd/containerd.sock + ├─2013 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 6a7f24751a4f36a0fab37bbaafded1eaa1ac6421ecc5538e3fa66d61110013b4 -address /run/containerd/containerd.sock + ├─2068 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 18ea93dc408f91fe3548595f4045f8e889a269ed28dbc0f82de093f06f2c6010 -address /run/containerd/containerd.sock + ├─2070 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 500cc8e5160bff5849ae1454a967249101e15b82a3ba0bee63299fecf7ae7cfd -address /run/containerd/containerd.sock + ├─2536 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c90f9da6ef9fa38d40aaa88a7115f74e46e9878d172d3d8a840e29ec5e63eaae -address /run/containerd/containerd.sock + ├─2565 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 851cd381ac54e094fd2b21ba8002437c43bc89eee6cb9970cae1f87b167f99cb -address /run/containerd/containerd.sock + ├─2623 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 3a059aa894af237c81f08f3907314916b26a0bf75edd30b7129b82a04fc7cb1b -address /run/containerd/containerd.sock + ├─3005 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9dcc710d8c2724b09955d8093dc905f01fb546a404d39b06f19787fcda1ae7cb -address /run/containerd/containerd.sock + ├─3060 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 532109cc7b0b380b2fb1391c5c4fcc0b26e6182ccafdbbe213555d3207b6ecdf -address /run/containerd/containerd.sock + ├─3153 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2582bf82511090acfd0d9347f1bfe234102da93438e2a5bad2b45dd98f6c383d -address /run/containerd/containerd.sock + ├─3448 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 385deb28efb1ba71d375d71fb40f9614945b4e8c91e31e24a5df693348380228 -address /run/containerd/containerd.sock + ├─3526 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 8af11b9d773aab3f997eb6ea70a3bad915e25559d6ad78c2e6c92e95e2c202a0 -address /run/containerd/containerd.sock + ├─3666 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 62c7e686356854921531eda22433ab1872e507740887b1618d41531e91140f81 -address /run/containerd/containerd.sock + └─3791 /usr/bin/containerd-shim-runc-v2 -namespace moby -id eeacbb00fc91228f9a9dcd83f28cd0e92deefb07a8ed9964f7b2bf7fe168bd51 -address /run/containerd/containerd.sock + + Nov 02 23:25:32 flannel-999044 containerd[1032]: time="2025-11-02T23:25:32.301606290Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.381647441Z" level=info msg="shim disconnected" id=66c3e3470d59f875fc92049b8ab0495dec2c3d15e242f56d7a7a2e9794d14c96 namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.381675792Z" level=warning msg="cleaning up after shim disconnected" id=66c3e3470d59f875fc92049b8ab0495dec2c3d15e242f56d7a7a2e9794d14c96 namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.381683528Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.963308790Z" level=info msg="shim disconnected" id=bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4 namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.963341637Z" level=warning msg="cleaning up after shim disconnected" id=bbe397e932a520c62237683e32254dce7e41e4ec905b51c4a7271d0361d705c4 namespace=moby + Nov 02 23:25:34 flannel-999044 containerd[1032]: time="2025-11-02T23:25:34.963347237Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:25:35 flannel-999044 containerd[1032]: time="2025-11-02T23:25:35.775186362Z" level=info msg="shim disconnected" id=3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c namespace=moby + Nov 02 23:25:35 flannel-999044 containerd[1032]: time="2025-11-02T23:25:35.775215831Z" level=warning msg="cleaning up after shim disconnected" id=3b78cbe0cd110c5f32a9ffca2a74d6322fc7e180011bc791cfcfc48e86ebab1c namespace=moby + Nov 02 23:25:35 flannel-999044 containerd[1032]: time="2025-11-02T23:25:35.775223751Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://flannel-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://flannel-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://flannel-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:26:08.263651945Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:08.263671195Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:08.263697147Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:26:08.263710618Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:26:08.263756093Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:26:08.263822876Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: flannel-999044 [took: 13.333327783s] -------------------------------- + helpers_test.go:175: Cleaning up "flannel-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p flannel-999044 + net_test.go:211: + ----------------------- debugLogs start: bridge-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.4 netcat-cd4db9dbf-fjnrs + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.103.2 bridge-999044 + 192.168.103.1 host.minikube.internal + 192.168.103.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.103.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/bridge-999044 Ready control-plane 56s v1.34.1 192.168.103.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 55s + default service/netcat ClusterIP 10.98.128.27 8080/TCP 15s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 52s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.103.2:8443 55s + default endpoints/netcat 10.244.0.4:8080 15s + kube-system endpoints/k8s.io-minikube-hostpath 46s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 47s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 52s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 15s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 52s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-fjnrs 1/1 Running 0 15s 10.244.0.4 bridge-999044 + kube-system pod/coredns-66bc5c9577-mfcg7 1/1 Running 0 47s 10.244.0.2 bridge-999044 + kube-system pod/etcd-bridge-999044 1/1 Running 0 53s 192.168.103.2 bridge-999044 + kube-system pod/kube-apiserver-bridge-999044 1/1 Running 0 53s 192.168.103.2 bridge-999044 + kube-system pod/kube-controller-manager-bridge-999044 1/1 Running 0 53s 192.168.103.2 bridge-999044 + kube-system pod/kube-proxy-xmzpf 1/1 Running 0 47s 192.168.103.2 bridge-999044 + kube-system pod/kube-scheduler-bridge-999044 1/1 Running 0 55s 192.168.103.2 bridge-999044 + kube-system pod/storage-provisioner 1/1 Running 1 (15s ago) 46s 192.168.103.2 bridge-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + c3cf4d8e48f88 14 seconds ago Ready netcat-cd4db9dbf-fjnrs default 0 (default) + 9a331e1de6fdf 46 seconds ago Ready storage-provisioner kube-system 0 (default) + 60a3f80b01fa4 46 seconds ago Ready kube-proxy-xmzpf kube-system 0 (default) + bf5cc7d2cea19 46 seconds ago Ready coredns-66bc5c9577-mfcg7 kube-system 0 (default) + 61c7f93e01b8d 46 seconds ago NotReady coredns-66bc5c9577-9bxps kube-system 0 (default) + 22673fd439034 57 seconds ago Ready kube-apiserver-bridge-999044 kube-system 0 (default) + a83548c94c982 57 seconds ago Ready etcd-bridge-999044 kube-system 0 (default) + 6ffa37255b17a 57 seconds ago Ready kube-scheduler-bridge-999044 kube-system 0 (default) + 665c15d057dea 57 seconds ago Ready kube-controller-manager-bridge-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 2ff2944748437 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 13 seconds ago Running dnsutils 0 c3cf4d8e48f88 netcat-cd4db9dbf-fjnrs default + 23f52ccdf60be 6e38f40d628db 15 seconds ago Running storage-provisioner 1 9a331e1de6fdf storage-provisioner kube-system + 60f616688ebec 6e38f40d628db 45 seconds ago Exited storage-provisioner 0 9a331e1de6fdf storage-provisioner kube-system + 2b4bb93d0766f fc25172553d79 46 seconds ago Running kube-proxy 0 60a3f80b01fa4 kube-proxy-xmzpf kube-system + 1c527a775cadc 52546a367cc9e 46 seconds ago Running coredns 0 bf5cc7d2cea19 coredns-66bc5c9577-mfcg7 kube-system + 7d6c53f8693ab 7dd6aaa1717ab 57 seconds ago Running kube-scheduler 0 6ffa37255b17a kube-scheduler-bridge-999044 kube-system + 519b7735bbebd c3994bc696102 57 seconds ago Running kube-apiserver 0 22673fd439034 kube-apiserver-bridge-999044 kube-system + ecc8763a3f7cb 5f1f5298c888d 57 seconds ago Running etcd 0 a83548c94c982 etcd-bridge-999044 kube-system + 81ef4c3f97d4a c80c8dbafe7dd 57 seconds ago Running kube-controller-manager 0 665c15d057dea kube-controller-manager-bridge-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:25:47 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 15s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-fjnrs + Namespace: default + Priority: 0 + Service Account: default + Node: bridge-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:25:47 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.4 + IPs: + IP: 10.244.0.4 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://2ff2944748437137f6da3c5f66034a712a989e1a71cca81294d963ad02648b9a + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:25:49 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-g6l6h (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-g6l6h: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15s default-scheduler Successfully assigned default/netcat-cd4db9dbf-fjnrs to bridge-999044 + Normal Pulling 14s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 13s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 894ms (895ms including waiting). Image size: 127004766 bytes. + Normal Created 13s kubelet Created container: dnsutils + Normal Started 13s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:25:10 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 47s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 46s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-mfcg7 + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: bridge-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:25:15 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://1c527a775cadcecd2c003dd4ea918e73fc635e76d587d954c5d6feddafde1b4f + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:25:16 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-fx5p8 (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-fx5p8: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 47s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-mfcg7 to bridge-999044 + Normal Pulled 46s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 46s kubelet Created container: coredns + Normal Started 46s kubelet Started container coredns + Warning Unhealthy 27s (x3 over 37s) kubelet Readiness probe failed: HTTP probe failed with statuscode: 503 + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API + .:53 + [INFO] plugin/reload: Running configuration SHA512 = 1b226df79860026c6a52e67daa10d7f0d57ec5b023288ec00c5e05f93523c894564e15b91770d3a07ae1cfbe861d15b37d4a0027e69c546ab112970993a3b03b + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] Reloading + [INFO] plugin/reload: Running configuration SHA512 = 66f0a748f44f6317a6b122af3f457c9dd0ecaed8718ffbf95a69434523efd9ec4992e71f54c7edd5753646fe9af89ac2138b9c3ce14d4a0ba9d2372a55f120bb + [INFO] Reloading complete + [INFO] 127.0.0.1:41689 - 58969 "HINFO IN 8770958201303683883.8188649503347179348. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.030204599s + [INFO] 10.244.0.4:58643 - 43155 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000215775s + [INFO] 10.244.0.4:59547 - 55238 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000112626s + [INFO] 10.244.0.4:37914 - 29028 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000285908s + [INFO] 10.244.0.4:32954 - 58560 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000173565s + [INFO] 10.244.0.4:32954 - 58348 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000199549s + [INFO] 10.244.0.4:59785 - 41066 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000085392s + [INFO] 10.244.0.4:53222 - 42460 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000084856s + [INFO] 10.244.0.4:57752 - 37148 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000068632s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-bridge-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: bridge-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:25:09 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.103.2:8443 + kubernetes.io/config.hash: 6b289b71e10bae1125c4c8fddb291ba1 + kubernetes.io/config.mirror: 6b289b71e10bae1125c4c8fddb291ba1 + kubernetes.io/config.seen: 2025-11-02T23:25:09.211722848Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.103.2 + IPs: + IP: 192.168.103.2 + Controlled By: Node/bridge-999044 + Containers: + kube-apiserver: + Container ID: docker://519b7735bbebd53327dcc056685f56923358bf7014dad94bd82c673e8577fd96 + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.103.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:25:05 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.103.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.103.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.103.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:25:05.339224 1 options.go:263] external host was not specified, using 192.168.103.2 + I1102 23:25:05.341104 1 server.go:150] Version: v1.34.1 + I1102 23:25:05.341122 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:25:05.651632 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:25:05.651728 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:25:05.651746 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:25:05.651756 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:25:05.651760 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:25:05.651763 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:25:05.651765 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:25:05.651767 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:25:05.651770 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:25:05.651772 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:25:05.651775 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:25:05.651778 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:25:05.662892 1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:05.664490 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:25:05.672123 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:05.676079 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:25:05.676093 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:25:05.676241 1 instance.go:239] Using reconciler: lease + W1102 23:25:05.676954 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.913072 1 logging.go:55] [core] [Channel #11 SubChannel #12]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.918768 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:25:05.925244 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:25:05.925255 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:25:05.927629 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:25:05.927889 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.931940 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.936476 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.946242 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:05.948047 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.953958 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:05.958753 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.962688 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.966158 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.969995 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.973508 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:05.977741 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.981860 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.986737 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.990788 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:05.996140 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.000274 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:06.018814 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:25:06.019405 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:25:06.022536 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.028170 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.033212 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.041814 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.045907 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.049554 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.053545 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.058335 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.063651 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.068330 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.073011 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.076488 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.081240 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.090053 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.093476 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.097715 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.102370 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.106960 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.110737 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.114858 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.119700 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.124197 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.128255 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.132865 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:25:06.136038 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:25:06.136706 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.141260 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.147224 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.150367 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.154351 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.157689 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.162676 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.167803 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.172276 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.176018 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.179307 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:06.183088 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.186639 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.196924 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.200786 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:06.204474 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:06.219890 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:25:06.219903 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.219907 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.220171 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:25:06.220176 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:25:06.220576 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:25:06.220906 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:25:06.220911 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:25:06.220927 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:25:06.221517 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:25:06.221522 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:25:06.221899 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:25:06.221905 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.221908 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.222214 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:25:06.222219 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.222222 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:25:06.222468 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:25:06.222473 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:25:06.223479 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:25:06.223486 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:25:06.223688 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:25:06.223691 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.223694 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.224027 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:25:06.224031 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:25:06.224726 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:25:06.224732 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.224735 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.224983 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:25:06.224987 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.224990 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.225950 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:25:06.225959 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.225963 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.226458 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:25:06.226462 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:25:06.226465 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:25:06.226467 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:25:06.228104 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:25:06.228113 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:25:06.228117 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:25:06.229080 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:25:06.229087 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.229090 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:06.229399 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:25:06.229404 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.229442 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:25:06.230390 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:25:06.230396 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:25:06.230399 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:25:06.231872 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:06.235321 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:25:06.235333 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:25:06.466232 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:06.466240 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:06.466426 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:25:06.466609 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:25:06.466623 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:25:06.466647 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:25:06.466669 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:25:06.466713 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:25:06.466739 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:25:06.466758 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:25:06.466780 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:25:06.466789 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:25:06.466795 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:25:06.466703 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:25:06.466802 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:25:06.466818 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:25:06.466824 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:25:06.466829 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:25:06.466835 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:25:06.466898 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:25:06.466692 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:25:06.467080 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:06.467125 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:06.466999 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:25:06.467349 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:25:06.467008 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:25:06.467017 1 controller.go:142] Starting OpenAPI controller + I1102 23:25:06.467031 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:25:06.467038 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:25:06.467044 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:25:06.466683 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:25:06.467053 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:25:06.467060 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:25:06.467082 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:25:06.467821 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:25:06.467120 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:25:06.467329 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:25:06.467856 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:25:06.564965 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:25:06.567107 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:25:06.567137 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:25:06.567147 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:25:06.567166 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:25:06.567194 1 aggregator.go:171] initial CRD sync complete... + I1102 23:25:06.567203 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:25:06.567206 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:25:06.567209 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:25:06.567225 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:25:06.567236 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:25:06.567549 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:25:06.567679 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:25:06.567706 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:25:06.568002 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:25:06.568021 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:25:06.568006 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:25:06.572471 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:06.572490 1 policy_source.go:240] refreshing policies + E1102 23:25:06.619831 1 controller.go:148] "Unhandled Error" err="while syncing ConfigMap \"kube-system/kube-apiserver-legacy-service-account-token-tracking\", err: namespaces \"kube-system\" not found" logger="UnhandledError" + E1102 23:25:06.630729 1 controller.go:145] "Failed to ensure lease exists, will retry" err="namespaces \"kube-system\" not found" interval="200ms" + I1102 23:25:06.669134 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:25:06.671394 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:06.671418 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:25:06.677259 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:06.677477 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:25:06.677514 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:25:06.832227 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:25:07.469844 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:25:07.473304 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:25:07.473314 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:25:07.735103 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:25:07.759158 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:25:07.873531 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:25:07.876635 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.103.2] + I1102 23:25:07.877205 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:25:07.879524 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:25:08.980655 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:25:10.590077 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:25:10.594925 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:25:10.599223 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:25:15.507881 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:25:15.557967 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:25:15.610365 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:15.612770 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:47.829054 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.98.128.27"} + E1102 23:25:55.935280 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:49374: use of closed network connection + E1102 23:25:56.010019 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:49390: use of closed network connection + E1102 23:25:56.081528 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:49406: use of closed network connection + E1102 23:25:56.165797 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:49430: use of closed network connection + E1102 23:26:01.236033 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:49458: use of closed network connection + E1102 23:26:01.307798 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:51102: use of closed network connection + E1102 23:26:01.373244 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:51112: use of closed network connection + E1102 23:26:01.446315 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:51132: use of closed network connection + E1102 23:26:01.515224 1 conn.go:339] Error on socket receive: read tcp 192.168.103.2:8443->192.168.103.1:51158: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + /etc/cni/net.d/1-k8s.conflist + + { + "cniVersion": "0.4.0", + "name": "bridge", + "plugins": [ + { + "type": "bridge", + "bridge": "bridge", + "addIf": "true", + "isDefaultGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "10.244.0.0/16" + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 3: eth0@if355: mtu 1500 qdisc noqueue state UP group default + link/ether aa:a7:8b:10:ec:73 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.103.2/24 brd 192.168.103.255 scope global eth0 + valid_lft forever preferred_lft forever + 4: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether 02:e8:60:9d:1c:bc brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 5: bridge: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 52:98:69:27:78:dc brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/16 brd 10.244.255.255 scope global bridge + valid_lft forever preferred_lft forever + inet6 fe80::5098:69ff:fe27:78dc/64 scope link + valid_lft forever preferred_lft forever + 6: vethd832f7e5@if3: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether 4a:cc:be:02:aa:7f brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::48cc:beff:fe02:aa7f/64 scope link + valid_lft forever preferred_lft forever + 8: vethad5472ef@if3: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether d2:78:8d:6b:dd:07 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::d078:8dff:fe6b:dd07/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.103.1 dev eth0 + 10.244.0.0/16 dev bridge proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.103.0/24 dev eth0 proto kernel scope link src 192.168.103.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:03 2025 + *mangle + :PREROUTING ACCEPT [24449:56339135] + :INPUT ACCEPT [24372:56333817] + :FORWARD ACCEPT [77:5318] + :OUTPUT ACCEPT [18825:5699300] + :POSTROUTING ACCEPT [18902:5704618] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:26:03 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:03 2025 + *filter + :INPUT ACCEPT [4385:1013895] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4340:1313573] + :CNI-ADMIN - [0:0] + :CNI-FORWARD - [0:0] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -m comment --comment "CNI firewall plugin rules" -j CNI-FORWARD + -A FORWARD -m comment --comment "CNI firewall plugin rules" -j CNI-FORWARD + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A CNI-FORWARD -m comment --comment "CNI firewall plugin admin overrides" -j CNI-ADMIN + -A CNI-FORWARD -d 10.244.0.2/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.2/32 -j ACCEPT + -A CNI-FORWARD -d 10.244.0.4/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.4/32 -j ACCEPT + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:26:03 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:03 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [56:3360] + :POSTROUTING ACCEPT [56:3360] + :CNI-54526923ab07acc6312b581e - [0:0] + :CNI-7b69683a3fe9a0d65a0ab570 - [0:0] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-QPAQX3CSKXOU5VQU - [0:0] + :KUBE-SEP-XPTUC4FRAFEFDBGF - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.103.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.103.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.103.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -s 10.244.0.2/32 -m comment --comment "name: \"bridge\" id: \"bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b\"" -j CNI-7b69683a3fe9a0d65a0ab570 + -A POSTROUTING -s 10.244.0.4/32 -m comment --comment "name: \"bridge\" id: \"c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f\"" -j CNI-54526923ab07acc6312b581e + -A CNI-54526923ab07acc6312b581e -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f\"" -j ACCEPT + -A CNI-54526923ab07acc6312b581e ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f\"" -j MASQUERADE + -A CNI-7b69683a3fe9a0d65a0ab570 -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b\"" -j ACCEPT + -A CNI-7b69683a3fe9a0d65a0ab570 ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b\"" -j MASQUERADE + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.103.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:38867 + -A DOCKER_OUTPUT -d 192.168.103.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:48185 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 38867 -j SNAT --to-source 192.168.103.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 48185 -j SNAT --to-source 192.168.103.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-QPAQX3CSKXOU5VQU -s 192.168.103.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-QPAQX3CSKXOU5VQU -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.103.2:8443 + -A KUBE-SEP-XPTUC4FRAFEFDBGF -s 10.244.0.4/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-XPTUC4FRAFEFDBGF -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.4:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.98.128.27/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.103.2:8443" -j KUBE-SEP-QPAQX3CSKXOU5VQU + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.98.128.27/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.4:8080" -j KUBE-SEP-XPTUC4FRAFEFDBGF + COMMIT + # Completed on Sun Nov 2 23:26:03 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 56 3570 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 3 255 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.103.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 57 packets, 3420 bytes) + pkts bytes target prot opt in out source destination + 547 45460 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 353 33934 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.103.1 + 95 5700 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 57 packets, 3420 bytes) + pkts bytes target prot opt in out source destination + 556 46135 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.103.1 + 3 180 CNI-7b69683a3fe9a0d65a0ab570 0 -- * * 10.244.0.2 0.0.0.0/0 /* name: "bridge" id: "bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b" */ + 9 675 CNI-54526923ab07acc6312b581e 0 -- * * 10.244.0.4 0.0.0.0/0 /* name: "bridge" id: "c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f" */ + + Chain CNI-54526923ab07acc6312b581e (1 references) + pkts bytes target prot opt in out source destination + 9 675 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f" */ + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f" */ + + Chain CNI-7b69683a3fe9a0d65a0ab570 (1 references) + pkts bytes target prot opt in out source destination + 0 0 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b" */ + 3 180 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b" */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.103.1 tcp dpt:53 to:127.0.0.11:38867 + 356 34189 DNAT 17 -- * * 0.0.0.0/0 192.168.103.1 udp dpt:53 to:127.0.0.11:48185 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:38867 to:192.168.103.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:48185 to:192.168.103.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 66 4095 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-QPAQX3CSKXOU5VQU (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 192.168.103.2 0.0.0.0/0 /* default/kubernetes:https */ + 4 240 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.103.2:8443 + + Chain KUBE-SEP-XPTUC4FRAFEFDBGF (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.4 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.4:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.98.128.27 /* default/netcat cluster IP */ tcp dpt:8080 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 93 5580 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 4 240 KUBE-SEP-QPAQX3CSKXOU5VQU 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.103.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.98.128.27 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-XPTUC4FRAFEFDBGF 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.4:8080 */ + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 48s daemonset-controller Created pod: kube-proxy-xmzpf + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-xmzpf + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: bridge-999044/192.168.103.2 + Start Time: Sun, 02 Nov 2025 23:25:15 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.103.2 + IPs: + IP: 192.168.103.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://2b4bb93d0766fec8c5d5c735cbafdd8c0e2823b0a08f7881ce1915da725d299e + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:25:16 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-s5zxl (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-s5zxl: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 48s default-scheduler Successfully assigned kube-system/kube-proxy-xmzpf to bridge-999044 + Warning FailedMount 48s kubelet MountVolume.SetUp failed for volume "kube-api-access-s5zxl" : configmap "kube-root-ca.crt" not found + Normal Pulled 47s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 47s kubelet Created container: kube-proxy + Normal Started 47s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:25:16.753165 1 server_linux.go:53] "Using iptables proxy" + I1102 23:25:16.793824 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:25:16.894031 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:25:16.894048 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.103.2"] + E1102 23:25:16.894088 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:25:16.909409 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:25:16.909432 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:25:16.916107 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:25:16.916410 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:25:16.916480 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:25:16.917611 1 config.go:200] "Starting service config controller" + I1102 23:25:16.917658 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:25:16.917828 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:25:16.917834 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:25:16.917886 1 config.go:309] "Starting node config controller" + I1102 23:25:16.917888 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:25:16.917892 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:25:16.918151 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:25:16.918166 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:25:17.017785 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:25:17.018018 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:25:17.018331 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://bridge-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://bridge-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:09 UTC; 54s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2217 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 32.8M + CPU: 1.015s + CGroup: /system.slice/kubelet.service + └─2217 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=bridge-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2 + + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.908958 2217 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-g5p5m\" (UniqueName: \"kubernetes.io/projected/612d9bce-8601-4282-8525-d923bf5cdec8-kube-api-access-g5p5m\") on node \"bridge-999044\" DevicePath \"\"" + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.908974 2217 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/612d9bce-8601-4282-8525-d923bf5cdec8-config-volume\") on node \"bridge-999044\" DevicePath \"\"" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.332140 2217 scope.go:117] "RemoveContainer" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.339886 2217 scope.go:117] "RemoveContainer" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: E1102 23:25:27.340366 2217 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.340390 2217 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a"} err="failed to get container status \"d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a\": rpc error: code = Unknown desc = Error response from daemon: No such container: d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:29 bridge-999044 kubelet[2217]: I1102 23:25:29.220841 2217 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612d9bce-8601-4282-8525-d923bf5cdec8" path="/var/lib/kubelet/pods/612d9bce-8601-4282-8525-d923bf5cdec8/volumes" + Nov 02 23:25:47 bridge-999044 kubelet[2217]: I1102 23:25:47.397305 2217 scope.go:117] "RemoveContainer" containerID="60f616688ebec4181728cff80d58aa20da2c82e269322d1a4eb9a6676a8d442b" + Nov 02 23:25:47 bridge-999044 kubelet[2217]: I1102 23:25:47.832526 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6l6h\" (UniqueName: \"kubernetes.io/projected/bc331d87-4758-44bb-beed-a74c7b6c1f89-kube-api-access-g6l6h\") pod \"netcat-cd4db9dbf-fjnrs\" (UID: \"bc331d87-4758-44bb-beed-a74c7b6c1f89\") " pod="default/netcat-cd4db9dbf-fjnrs" + Nov 02 23:25:56 bridge-999044 kubelet[2217]: E1102 23:25:56.081553 2217 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:48420->[::1]:34935: write tcp [::1]:48420->[::1]:34935: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://bridge-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://bridge-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=bridge-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.103.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:25:01 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:25:01 bridge-999044 kubelet[1536]: E1102 23:25:01.245612 1536 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:01 bridge-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:01 bridge-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:01 bridge-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:01 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:25:01 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:25:02 bridge-999044 kubelet[1684]: E1102 23:25:02.035885 1684 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:02 bridge-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:02 bridge-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:02 bridge-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:02 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:25:02 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:25:02 bridge-999044 kubelet[1707]: E1102 23:25:02.781016 1707 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:02 bridge-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:02 bridge-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:03 bridge-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:03 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:25:03 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:25:03 bridge-999044 kubelet[1715]: E1102 23:25:03.529267 1715 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:03 bridge-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:03 bridge-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:04 bridge-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:04 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:25:04 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:25:04 bridge-999044 kubelet[1725]: E1102 23:25:04.277030 1725 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:04 bridge-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:04 bridge-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:04 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:25:04 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.619633 1756 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.619696 1756 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.619720 1756 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.619729 1756 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.620050 1756 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.625228 1756 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.103.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.625390 1756 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.628412 1756 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632048 1756 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632069 1756 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632217 1756 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632233 1756 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"bridge-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632328 1756 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632333 1756 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.632406 1756 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.633532 1756 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.633674 1756 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.633687 1756 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.633709 1756 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.633733 1756 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.634216 1756 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.103.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dbridge-999044&limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.634308 1756 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.634698 1756 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.634720 1756 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: W1102 23:25:04.634759 1756 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.634306 1756 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.103.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.635394 1756 server.go:1262] "Started kubelet" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.635597 1756 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.635686 1756 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.636475 1756 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.636662 1756 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.636151 1756 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.103.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.103.2:8443: connect: connection refused" event="&Event{ObjectMeta:{bridge-999044.1874542c934e277f default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:bridge-999044,UID:bridge-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:bridge-999044,},FirstTimestamp:2025-11-02 23:25:04.635373439 +0000 UTC m=+0.166815562,LastTimestamp:2025-11-02 23:25:04.635373439 +0000 UTC m=+0.166815562,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:bridge-999044,}" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.635903 1756 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.636758 1756 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.636019 1756 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.639219 1756 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"bridge-999044\" not found" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.639631 1756 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.639783 1756 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.639887 1756 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.640336 1756 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.103.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.641411 1756 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/bridge-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.642170 1756 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.642296 1756 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.644568 1756 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.650303 1756 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.650375 1756 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.650427 1756 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.651335 1756 policy_none.go:49] "None policy: Start" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.651410 1756 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.651447 1756 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.651874 1756 policy_none.go:47] "Start" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.654604 1756 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.656040 1756 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.656143 1756 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.656376 1756 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.656494 1756 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.656637 1756 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.103.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.103.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.677103 1756 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.677188 1756 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.677196 1756 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.677341 1756 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.677658 1756 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.677688 1756 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"bridge-999044\" not found" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.773970 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.778284 1756 kubelet_node_status.go:75] "Attempting to register node" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.778518 1756 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.103.2:8443/api/v1/nodes\": dial tcp 192.168.103.2:8443: connect: connection refused" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.785824 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.789313 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.792005 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.842594 1756 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/bridge-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.942138 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-k8s-certs\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.942177 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-usr-local-share-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.943803 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-ca-certs\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.943904 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-flexvolume-dir\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944026 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-k8s-certs\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944046 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-kubeconfig\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944064 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-ca-certs\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944083 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-etc-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944110 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-etc-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944127 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-usr-local-share-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944187 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/36232eba3e27968f071177606a27e6a5-etcd-certs\") pod \"etcd-bridge-999044\" (UID: \"36232eba3e27968f071177606a27e6a5\") " pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944208 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/36232eba3e27968f071177606a27e6a5-etcd-data\") pod \"etcd-bridge-999044\" (UID: \"36232eba3e27968f071177606a27e6a5\") " pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944224 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-usr-share-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944238 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-usr-share-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.944253 1756 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/6801390925bbe44b4167c4aee770e917-kubeconfig\") pod \"kube-scheduler-bridge-999044\" (UID: \"6801390925bbe44b4167c4aee770e917\") " pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: I1102 23:25:04.979881 1756 kubelet_node_status.go:75] "Attempting to register node" node="bridge-999044" + Nov 02 23:25:04 bridge-999044 kubelet[1756]: E1102 23:25:04.980130 1756 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.103.2:8443/api/v1/nodes\": dial tcp 192.168.103.2:8443: connect: connection refused" node="bridge-999044" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: E1102 23:25:05.243437 1756 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.103.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/bridge-999044?timeout=10s\": dial tcp 192.168.103.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: I1102 23:25:05.381420 1756 kubelet_node_status.go:75] "Attempting to register node" node="bridge-999044" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: E1102 23:25:05.669753 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: E1102 23:25:05.672997 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: E1102 23:25:05.676825 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:05 bridge-999044 kubelet[1756]: E1102 23:25:05.680947 1756 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.488020 1756 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"bridge-999044\" not found" node="bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.578499 1756 kubelet_node_status.go:78] "Successfully registered node" node="bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.634548 1756 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.639502 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.641108 1756 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.691377 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.705715 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.705815 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.706444 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.706467 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.706480 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.708578 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.709190 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.709261 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.709249 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.711283 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: I1102 23:25:06.711304 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:06 bridge-999044 kubelet[1756]: E1102 23:25:06.713277 1756 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-bridge-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:07 bridge-999044 kubelet[1756]: I1102 23:25:07.695092 1756 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:09 bridge-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:09 bridge-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:25:09 bridge-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:25:09 bridge-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.191499 2217 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.191544 2217 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.191561 2217 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.191570 2217 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.191727 2217 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.192427 2217 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.193619 2217 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.196632 2217 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200445 2217 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200463 2217 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200602 2217 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200614 2217 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"bridge-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200717 2217 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200725 2217 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.200743 2217 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.201250 2217 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.201384 2217 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.201392 2217 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.201409 2217 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.201423 2217 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.202074 2217 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.202484 2217 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.202514 2217 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.203060 2217 server.go:1262] "Started kubelet" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.203216 2217 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.203623 2217 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.206053 2217 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.206129 2217 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.206392 2217 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.207946 2217 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.208480 2217 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: E1102 23:25:09.208538 2217 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"bridge-999044\" not found" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.208584 2217 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.209812 2217 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.209905 2217 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.209934 2217 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.210001 2217 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.211359 2217 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.216535 2217 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.217340 2217 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.217361 2217 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.217378 2217 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: E1102 23:25:09.217430 2217 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229582 2217 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229600 2217 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229612 2217 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229695 2217 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229706 2217 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229716 2217 policy_none.go:49] "None policy: Start" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229722 2217 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229728 2217 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229785 2217 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.229796 2217 policy_none.go:47] "Start" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: E1102 23:25:09.231892 2217 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.232002 2217 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.232014 2217 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.232197 2217 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: E1102 23:25:09.233098 2217 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.318534 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.318658 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.318665 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.318665 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.333879 2217 kubelet_node_status.go:75] "Attempting to register node" node="bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410241 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/36232eba3e27968f071177606a27e6a5-etcd-certs\") pod \"etcd-bridge-999044\" (UID: \"36232eba3e27968f071177606a27e6a5\") " pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410258 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/36232eba3e27968f071177606a27e6a5-etcd-data\") pod \"etcd-bridge-999044\" (UID: \"36232eba3e27968f071177606a27e6a5\") " pod="kube-system/etcd-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410266 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-etc-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410275 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-ca-certs\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410284 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-kubeconfig\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410291 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-usr-local-share-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410300 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-usr-share-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410325 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-etc-ca-certificates\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410347 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-flexvolume-dir\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410366 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-ca-certs\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410380 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-usr-share-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410388 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/cb4e8e534f4e418d2e607d58d17747bf-k8s-certs\") pod \"kube-controller-manager-bridge-999044\" (UID: \"cb4e8e534f4e418d2e607d58d17747bf\") " pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410400 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/6801390925bbe44b4167c4aee770e917-kubeconfig\") pod \"kube-scheduler-bridge-999044\" (UID: \"6801390925bbe44b4167c4aee770e917\") " pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410406 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-k8s-certs\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:09 bridge-999044 kubelet[2217]: I1102 23:25:09.410417 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/6b289b71e10bae1125c4c8fddb291ba1-usr-local-share-ca-certificates\") pod \"kube-apiserver-bridge-999044\" (UID: \"6b289b71e10bae1125c4c8fddb291ba1\") " pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.202061 2217 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.209552 2217 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.239132 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: E1102 23:25:10.572284 2217 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-bridge-999044\" already exists" pod="kube-system/kube-scheduler-bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: E1102 23:25:10.573809 2217 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-bridge-999044\" already exists" pod="kube-system/kube-apiserver-bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.574344 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-bridge-999044" podStartSLOduration=3.574335012 podStartE2EDuration="3.574335012s" podCreationTimestamp="2025-11-02 23:25:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:10.573203942 +0000 UTC m=+1.404906994" watchObservedRunningTime="2025-11-02 23:25:10.574335012 +0000 UTC m=+1.406038058" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.574752 2217 kubelet_node_status.go:124] "Node was previously registered" node="bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.578463 2217 kubelet_node_status.go:78] "Successfully registered node" node="bridge-999044" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.593320 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-bridge-999044" podStartSLOduration=1.593308347 podStartE2EDuration="1.593308347s" podCreationTimestamp="2025-11-02 23:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:10.588138039 +0000 UTC m=+1.419841086" watchObservedRunningTime="2025-11-02 23:25:10.593308347 +0000 UTC m=+1.425011392" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.599744 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-bridge-999044" podStartSLOduration=1.599733889 podStartE2EDuration="1.599733889s" podCreationTimestamp="2025-11-02 23:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:10.593606124 +0000 UTC m=+1.425309172" watchObservedRunningTime="2025-11-02 23:25:10.599733889 +0000 UTC m=+1.431436970" + Nov 02 23:25:10 bridge-999044 kubelet[2217]: I1102 23:25:10.611867 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-bridge-999044" podStartSLOduration=1.611858853 podStartE2EDuration="1.611858853s" podCreationTimestamp="2025-11-02 23:25:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:10.600434819 +0000 UTC m=+1.432137864" watchObservedRunningTime="2025-11-02 23:25:10.611858853 +0000 UTC m=+1.443561896" + Nov 02 23:25:11 bridge-999044 kubelet[2217]: I1102 23:25:11.246516 2217 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:11 bridge-999044 kubelet[2217]: E1102 23:25:11.257177 2217 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-bridge-999044\" already exists" pod="kube-system/kube-controller-manager-bridge-999044" + Nov 02 23:25:14 bridge-999044 kubelet[2217]: I1102 23:25:14.742480 2217 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:14 bridge-999044 kubelet[2217]: I1102 23:25:14.742843 2217 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: I1102 23:25:15.549792 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/637d94ce-bd70-46ce-be06-f584944d3847-xtables-lock\") pod \"kube-proxy-xmzpf\" (UID: \"637d94ce-bd70-46ce-be06-f584944d3847\") " pod="kube-system/kube-proxy-xmzpf" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: I1102 23:25:15.549819 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5zxl\" (UniqueName: \"kubernetes.io/projected/637d94ce-bd70-46ce-be06-f584944d3847-kube-api-access-s5zxl\") pod \"kube-proxy-xmzpf\" (UID: \"637d94ce-bd70-46ce-be06-f584944d3847\") " pod="kube-system/kube-proxy-xmzpf" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: I1102 23:25:15.549832 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/637d94ce-bd70-46ce-be06-f584944d3847-kube-proxy\") pod \"kube-proxy-xmzpf\" (UID: \"637d94ce-bd70-46ce-be06-f584944d3847\") " pod="kube-system/kube-proxy-xmzpf" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: I1102 23:25:15.549842 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/637d94ce-bd70-46ce-be06-f584944d3847-lib-modules\") pod \"kube-proxy-xmzpf\" (UID: \"637d94ce-bd70-46ce-be06-f584944d3847\") " pod="kube-system/kube-proxy-xmzpf" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: I1102 23:25:15.552034 2217 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:25:15 bridge-999044 kubelet[2217]: E1102 23:25:15.653983 2217 projected.go:291] Couldn't get configMap kube-system/kube-root-ca.crt: configmap "kube-root-ca.crt" not found + Nov 02 23:25:15 bridge-999044 kubelet[2217]: E1102 23:25:15.654012 2217 projected.go:196] Error preparing data for projected volume kube-api-access-s5zxl for pod kube-system/kube-proxy-xmzpf: configmap "kube-root-ca.crt" not found + Nov 02 23:25:15 bridge-999044 kubelet[2217]: E1102 23:25:15.654065 2217 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/637d94ce-bd70-46ce-be06-f584944d3847-kube-api-access-s5zxl podName:637d94ce-bd70-46ce-be06-f584944d3847 nodeName:}" failed. No retries permitted until 2025-11-02 23:25:16.154046855 +0000 UTC m=+6.985749896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s5zxl" (UniqueName: "kubernetes.io/projected/637d94ce-bd70-46ce-be06-f584944d3847-kube-api-access-s5zxl") pod "kube-proxy-xmzpf" (UID: "637d94ce-bd70-46ce-be06-f584944d3847") : configmap "kube-root-ca.crt" not found + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.052160 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5p5m\" (UniqueName: \"kubernetes.io/projected/612d9bce-8601-4282-8525-d923bf5cdec8-kube-api-access-g5p5m\") pod \"coredns-66bc5c9577-9bxps\" (UID: \"612d9bce-8601-4282-8525-d923bf5cdec8\") " pod="kube-system/coredns-66bc5c9577-9bxps" + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.052182 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1fa90afc-df55-4a9b-9ebd-7c3210e892ac-config-volume\") pod \"coredns-66bc5c9577-mfcg7\" (UID: \"1fa90afc-df55-4a9b-9ebd-7c3210e892ac\") " pod="kube-system/coredns-66bc5c9577-mfcg7" + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.052205 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fx5p8\" (UniqueName: \"kubernetes.io/projected/1fa90afc-df55-4a9b-9ebd-7c3210e892ac-kube-api-access-fx5p8\") pod \"coredns-66bc5c9577-mfcg7\" (UID: \"1fa90afc-df55-4a9b-9ebd-7c3210e892ac\") " pod="kube-system/coredns-66bc5c9577-mfcg7" + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.052217 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/612d9bce-8601-4282-8525-d923bf5cdec8-config-volume\") pod \"coredns-66bc5c9577-9bxps\" (UID: \"612d9bce-8601-4282-8525-d923bf5cdec8\") " pod="kube-system/coredns-66bc5c9577-9bxps" + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.656603 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/8831a2d0-34b3-4d13-b0f5-f6a7d8a59f10-tmp\") pod \"storage-provisioner\" (UID: \"8831a2d0-34b3-4d13-b0f5-f6a7d8a59f10\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:16 bridge-999044 kubelet[2217]: I1102 23:25:16.656649 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8p6v\" (UniqueName: \"kubernetes.io/projected/8831a2d0-34b3-4d13-b0f5-f6a7d8a59f10-kube-api-access-f8p6v\") pod \"storage-provisioner\" (UID: \"8831a2d0-34b3-4d13-b0f5-f6a7d8a59f10\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:17 bridge-999044 kubelet[2217]: I1102 23:25:17.291037 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-xmzpf" podStartSLOduration=2.29102028 podStartE2EDuration="2.29102028s" podCreationTimestamp="2025-11-02 23:25:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:17.28281281 +0000 UTC m=+8.114515850" watchObservedRunningTime="2025-11-02 23:25:17.29102028 +0000 UTC m=+8.122723323" + Nov 02 23:25:17 bridge-999044 kubelet[2217]: I1102 23:25:17.291119 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-mfcg7" podStartSLOduration=2.291114238 podStartE2EDuration="2.291114238s" podCreationTimestamp="2025-11-02 23:25:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:17.290711523 +0000 UTC m=+8.122414571" watchObservedRunningTime="2025-11-02 23:25:17.291114238 +0000 UTC m=+8.122817279" + Nov 02 23:25:17 bridge-999044 kubelet[2217]: I1102 23:25:17.299274 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-9bxps" podStartSLOduration=2.299260167 podStartE2EDuration="2.299260167s" podCreationTimestamp="2025-11-02 23:25:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:17.299131034 +0000 UTC m=+8.130834102" watchObservedRunningTime="2025-11-02 23:25:17.299260167 +0000 UTC m=+8.130963208" + Nov 02 23:25:17 bridge-999044 kubelet[2217]: I1102 23:25:17.312220 2217 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.312209044 podStartE2EDuration="1.312209044s" podCreationTimestamp="2025-11-02 23:25:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:17.305783263 +0000 UTC m=+8.137486320" watchObservedRunningTime="2025-11-02 23:25:17.312209044 +0000 UTC m=+8.143912090" + Nov 02 23:25:25 bridge-999044 kubelet[2217]: I1102 23:25:25.860435 2217 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.808073 2217 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5p5m\" (UniqueName: \"kubernetes.io/projected/612d9bce-8601-4282-8525-d923bf5cdec8-kube-api-access-g5p5m\") pod \"612d9bce-8601-4282-8525-d923bf5cdec8\" (UID: \"612d9bce-8601-4282-8525-d923bf5cdec8\") " + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.808098 2217 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/612d9bce-8601-4282-8525-d923bf5cdec8-config-volume\") pod \"612d9bce-8601-4282-8525-d923bf5cdec8\" (UID: \"612d9bce-8601-4282-8525-d923bf5cdec8\") " + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.808402 2217 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/612d9bce-8601-4282-8525-d923bf5cdec8-config-volume" (OuterVolumeSpecName: "config-volume") pod "612d9bce-8601-4282-8525-d923bf5cdec8" (UID: "612d9bce-8601-4282-8525-d923bf5cdec8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.809610 2217 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612d9bce-8601-4282-8525-d923bf5cdec8-kube-api-access-g5p5m" (OuterVolumeSpecName: "kube-api-access-g5p5m") pod "612d9bce-8601-4282-8525-d923bf5cdec8" (UID: "612d9bce-8601-4282-8525-d923bf5cdec8"). InnerVolumeSpecName "kube-api-access-g5p5m". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.908958 2217 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-g5p5m\" (UniqueName: \"kubernetes.io/projected/612d9bce-8601-4282-8525-d923bf5cdec8-kube-api-access-g5p5m\") on node \"bridge-999044\" DevicePath \"\"" + Nov 02 23:25:26 bridge-999044 kubelet[2217]: I1102 23:25:26.908974 2217 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/612d9bce-8601-4282-8525-d923bf5cdec8-config-volume\") on node \"bridge-999044\" DevicePath \"\"" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.332140 2217 scope.go:117] "RemoveContainer" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.339886 2217 scope.go:117] "RemoveContainer" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: E1102 23:25:27.340366 2217 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = Unknown desc = Error response from daemon: No such container: d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" containerID="d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:27 bridge-999044 kubelet[2217]: I1102 23:25:27.340390 2217 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"docker","ID":"d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a"} err="failed to get container status \"d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a\": rpc error: code = Unknown desc = Error response from daemon: No such container: d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a" + Nov 02 23:25:29 bridge-999044 kubelet[2217]: I1102 23:25:29.220841 2217 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612d9bce-8601-4282-8525-d923bf5cdec8" path="/var/lib/kubelet/pods/612d9bce-8601-4282-8525-d923bf5cdec8/volumes" + Nov 02 23:25:47 bridge-999044 kubelet[2217]: I1102 23:25:47.397305 2217 scope.go:117] "RemoveContainer" containerID="60f616688ebec4181728cff80d58aa20da2c82e269322d1a4eb9a6676a8d442b" + Nov 02 23:25:47 bridge-999044 kubelet[2217]: I1102 23:25:47.832526 2217 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6l6h\" (UniqueName: \"kubernetes.io/projected/bc331d87-4758-44bb-beed-a74c7b6c1f89-kube-api-access-g6l6h\") pod \"netcat-cd4db9dbf-fjnrs\" (UID: \"bc331d87-4758-44bb-beed-a74c7b6c1f89\") " pod="default/netcat-cd4db9dbf-fjnrs" + Nov 02 23:25:56 bridge-999044 kubelet[2217]: E1102 23:25:56.081553 2217 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:48420->[::1]:34935: write tcp [::1]:48420->[::1]:34935: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.103.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:bridge-999044 + name: system:node:bridge-999044@mk + current-context: system:node:bridge-999044@mk + kind: Config + users: + - name: system:node:bridge-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:16 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: bridge-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: enable-default-cni-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.85.2:8443 + name: flannel-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kubenet-999044 + contexts: + - context: + cluster: bridge-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:16 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: bridge-999044 + name: bridge-999044 + - context: + cluster: enable-default-cni-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: enable-default-cni-999044 + name: enable-default-cni-999044 + - context: + cluster: flannel-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: flannel-999044 + name: flannel-999044 + - context: + cluster: kubenet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kubenet-999044 + name: kubenet-999044 + current-context: enable-default-cni-999044 + kind: Config + users: + - name: bridge-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/bridge-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/bridge-999044/client.key + - name: enable-default-cni-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.key + - name: flannel-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/flannel-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/flannel-999044/client.key + - name: kubenet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:15Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "328" + uid: 5b053af3-4fca-4d79-9e04-4b8b818a4952 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:15Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "329" + uid: 7b3b0513-8fe5-4be5-9984-a9ea94ef6ac4 + - apiVersion: v1 + data: + jws-kubeconfig-nois7g: eyJhbGciOiJIUzI1NiIsImtpZCI6Im5vaXM3ZyJ9..5tRZGZh4la1V1JK9xBI5Hzb8DiG7DXR1DsKT8-mE3gg + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:08Z" + name: cluster-info + namespace: kube-public + resourceVersion: "297" + uid: 654db6c5-0664-4e83-8c23-d612eb88134e + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:15Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "330" + uid: a7f691a2-c88f-45ea-8c7d-21634ff9aba4 + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.103.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:10Z" + name: coredns + namespace: kube-system + resourceVersion: "347" + uid: 20817aa4-7238-4a84-93aa-44203ed5fb6e + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIISlL+o7/KI98wDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMyMDAxWhcNMzUxMDMxMjMyNTAx + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAM3Qfbjlb4vl5//cUq6heYxsQF7tuxDpLbrkzmud+/eJk1ft + 40gc86d0vjBghGcLJsxZUjToMtf6c/EKozwCDArLMGEUJYUV8w9+iY1HGnzacXcm + 8NQnRXjt2JlMPTF0Mi2Q5e1uKpEGUwb55UMEIy1a906VJ4osI8cA/kv8HdS4uLAo + b1CDmxC48SdcQ2bJjVKaBCQuWp5HynDDP6HoMTT5RvZGCrwbeEfYaE+dRiCejZkv + ZxJokHVkB1j0/Gux0wWJfXHXnIYLA+pD975igaTSaVUlZ/2WrYZwxcpzytu7ioID + 7q2Fw2lq6+Te3zI0InzKIgoG4GzbrIzM8eEDDFUCAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFAe6Q0zV0oRtxpHi2XDs + oyjZcHlqMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQDNAp4eq2AHwPudyZ2fM9dxw5LDsHp7z2QWV7B8p4Nlp5neHTNXjFHdUMs/ + p/RaVOYNN4hVvgf5QdEojbwPK0qTVvnRZbAHGQN0uLqeum1g9k1ioigPI2JghKDk + j2MFlvwCPmOO3EYncm/sfpejGmL5cFSQOebJYIpEP3cgd77zrKEQeKlAO2NxUBvX + hcuMpAHIgEvjUTS57+NuSO0hOLebrVy4pxB7TTe7uB7mXUygoirjmJuQMADcA0fL + WN8vcZXf9bzDSGJ/5JKMYlFLHnT+cK6ScO6lKzucMOdILhEfujKfk790gciB4cSw + tLCyB4AM1jIMIjM1j4TWrWMDFIGS + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:06Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "30" + uid: cbd1360a-1d00-4d1d-9bcc-d150182457de + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:06Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "24" + uid: 439a0c6d-ad7f-4212-a698-6a40981c671e + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:10Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "251" + uid: 60fcea50-db58-4322-9cbf-f41eb19bc58a + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:15Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "331" + uid: 9467e8b2-3a74-451d-8527-50bd6e09b333 + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.103.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:07Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "208" + uid: 3e9a9c50-12ab-4f23-9cd6-bd2531a29fcb + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:07Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "211" + uid: a0e72303-eaff-4659-8185-89cbcc98423f + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://bridge-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:00 UTC; 1min 4s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1045 (dockerd) + Tasks: 16 + Memory: 171.5M + CPU: 2.603s + CGroup: /system.slice/docker.service + └─1045 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.144485122Z" level=info msg="Initializing buildkit" + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.157229551Z" level=info msg="Completed buildkit initialization" + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.160877866Z" level=info msg="Daemon has completed initialization" + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.160951380Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.160952017Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:25:00 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:00.160978109Z" level=info msg="API listen on [::]:2376" + Nov 02 23:25:00 bridge-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:25:26 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:26.594361461Z" level=info msg="ignoring event" container=d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:25:26 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:26.684988390Z" level=info msg="ignoring event" container=61c7f93e01b8d37498f95611eb57fc66194e90c7b664197016fd073164e90d6c module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + Nov 02 23:25:47 bridge-999044 dockerd[1045]: time="2025-11-02T23:25:47.065346086Z" level=info msg="ignoring event" container=60f616688ebec4181728cff80d58aa20da2c82e269322d1a4eb9a6676a8d442b module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://bridge-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 18 + Running: 16 + Paused: 0 + Stopped: 2 + Images: 9 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: bridge-999044 + ID: 51080251-514b-4962-9815-45af53dce418 + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://bridge-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://bridge-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:00 UTC; 1min 5s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1353 (cri-dockerd) + Tasks: 13 + Memory: 17.3M + CPU: 727ms + CGroup: /system.slice/cri-docker.service + └─1353 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:25:05 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a83548c94c982d7774f832512eaf1c20f9f633a898ce058feb3c7a596809ca4f/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:05 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/6ffa37255b17a27fa30b39327c793ba6622ade7bad40f6d80db30095b462727c/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:05 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:05Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/22673fd439034ed36412255af6b3647b5ae9c65647d1577f9cc62271306ffb3c/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:14 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:14Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:25:16 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:16 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/61c7f93e01b8d37498f95611eb57fc66194e90c7b664197016fd073164e90d6c/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:16 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:16Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/60a3f80b01fa4f857b94087771e31e41b58bb36b03f57640ffb9689bb6c1ce8b/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:17 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:17Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9a331e1de6fdfdb1be103e8ef5b5eddf2ca953e5fdcc39cabbc96581af696412/resolv.conf as [nameserver 192.168.103.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:48 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:48Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:49 bridge-999044 cri-dockerd[1353]: time="2025-11-02T23:25:49Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://bridge-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://bridge-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://bridge-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:24:59 UTC; 1min 7s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1032 (containerd) + Tasks: 189 + Memory: 94.2M + CPU: 1.151s + CGroup: /system.slice/containerd.service + ├─1032 /usr/bin/containerd + ├─1806 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 665c15d057dea2411257c10a3d66ec09bc7c31d569d510d5ee9316efc8409092 -address /run/containerd/containerd.sock + ├─1820 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a83548c94c982d7774f832512eaf1c20f9f633a898ce058feb3c7a596809ca4f -address /run/containerd/containerd.sock + ├─1857 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 6ffa37255b17a27fa30b39327c793ba6622ade7bad40f6d80db30095b462727c -address /run/containerd/containerd.sock + ├─1910 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 22673fd439034ed36412255af6b3647b5ae9c65647d1577f9cc62271306ffb3c -address /run/containerd/containerd.sock + ├─1991 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ecc8763a3f7cb06c16b0b26d90fff135153820b3acee22281af2f7fac21e6f41 -address /run/containerd/containerd.sock + ├─1993 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 81ef4c3f97d4a62cb54472fc404a8cf9c8e5cdf61301de178afcbf80c6cd22a8 -address /run/containerd/containerd.sock + ├─2053 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 519b7735bbebd53327dcc056685f56923358bf7014dad94bd82c673e8577fd96 -address /run/containerd/containerd.sock + ├─2087 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 7d6c53f8693ab540eee355361184183dc07f9baa090b6aeb988e98498ea3db40 -address /run/containerd/containerd.sock + ├─2504 /usr/bin/containerd-shim-runc-v2 -namespace moby -id bf5cc7d2cea194d8971604aceba1bffe67fda0a3fde9d6bb252255bb2933889b -address /run/containerd/containerd.sock + ├─2729 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 60a3f80b01fa4f857b94087771e31e41b58bb36b03f57640ffb9689bb6c1ce8b -address /run/containerd/containerd.sock + ├─2766 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1c527a775cadcecd2c003dd4ea918e73fc635e76d587d954c5d6feddafde1b4f -address /run/containerd/containerd.sock + ├─2884 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2b4bb93d0766fec8c5d5c735cbafdd8c0e2823b0a08f7881ce1915da725d299e -address /run/containerd/containerd.sock + ├─2983 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 9a331e1de6fdfdb1be103e8ef5b5eddf2ca953e5fdcc39cabbc96581af696412 -address /run/containerd/containerd.sock + ├─3402 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 23f52ccdf60bef804f11b1e93581922d9dcd8dc7f6b06248de68d5a1ef1cc545 -address /run/containerd/containerd.sock + ├─3466 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c3cf4d8e48f8829f4ed36d1bb56089d529e6dfeb0f0f59d7d67904fc49a0857f -address /run/containerd/containerd.sock + └─3605 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 2ff2944748437137f6da3c5f66034a712a989e1a71cca81294d963ad02648b9a -address /run/containerd/containerd.sock + + Nov 02 23:24:59 bridge-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.594318350Z" level=info msg="shim disconnected" id=d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a namespace=moby + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.594489125Z" level=warning msg="cleaning up after shim disconnected" id=d3f9649c2d7cbf6aeb8f9caed4f711369f3aae9273c4b66d05ee483c1ac4b08a namespace=moby + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.594496844Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.684932739Z" level=info msg="shim disconnected" id=61c7f93e01b8d37498f95611eb57fc66194e90c7b664197016fd073164e90d6c namespace=moby + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.684959614Z" level=warning msg="cleaning up after shim disconnected" id=61c7f93e01b8d37498f95611eb57fc66194e90c7b664197016fd073164e90d6c namespace=moby + Nov 02 23:25:26 bridge-999044 containerd[1032]: time="2025-11-02T23:25:26.684965195Z" level=info msg="cleaning up dead shim" namespace=moby + Nov 02 23:25:47 bridge-999044 containerd[1032]: time="2025-11-02T23:25:47.065310700Z" level=info msg="shim disconnected" id=60f616688ebec4181728cff80d58aa20da2c82e269322d1a4eb9a6676a8d442b namespace=moby + Nov 02 23:25:47 bridge-999044 containerd[1032]: time="2025-11-02T23:25:47.065345454Z" level=warning msg="cleaning up after shim disconnected" id=60f616688ebec4181728cff80d58aa20da2c82e269322d1a4eb9a6676a8d442b namespace=moby + Nov 02 23:25:47 bridge-999044 containerd[1032]: time="2025-11-02T23:25:47.065353691Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://bridge-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://bridge-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://bridge-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:26:08.906038355Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:08.906057712Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:08.906079968Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:26:08.906091642Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:26:08.906124888Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:26:08.906188208Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: bridge-999044 [took: 12.846846116s] -------------------------------- + helpers_test.go:175: Cleaning up "bridge-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p bridge-999044 + helpers_test.go:352: "netcat-cd4db9dbf-h69w6" [89714356-9111-4f71-953e-c8949c1c52ec] Running + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p flannel-999044: (1.779037023s) +=== CONT TestStartStop/group/old-k8s-version +=== RUN TestStartStop/group/old-k8s-version/serial +=== RUN TestStartStop/group/old-k8s-version/serial/FirstStart + start_stop_delete_test.go:184: (dbg) Run: out/minikube-linux-amd64 start -p old-k8s-version-736872 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.28.0 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p bridge-999044: (1.744630754s) +=== CONT TestStartStop/group/no-preload +=== RUN TestStartStop/group/no-preload/serial +=== RUN TestStartStop/group/no-preload/serial/FirstStart + start_stop_delete_test.go:184: (dbg) Run: out/minikube-linux-amd64 start -p no-preload-162995 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + net_test.go:163: (dbg) TestNetworkPlugins/group/enable-default-cni/NetCatPod: app=netcat healthy within 9.003079099s +=== RUN TestNetworkPlugins/group/enable-default-cni/DNS + net_test.go:175: (dbg) Run: kubectl --context enable-default-cni-999044 exec deployment/netcat -- nslookup kubernetes.default +=== RUN TestNetworkPlugins/group/enable-default-cni/Localhost + net_test.go:194: (dbg) Run: kubectl --context enable-default-cni-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z localhost 8080" +=== RUN TestNetworkPlugins/group/enable-default-cni/HairPin + net_test.go:264: (dbg) Run: kubectl --context enable-default-cni-999044 exec deployment/netcat -- /bin/sh -c "nc -w 5 -i 5 -z netcat 8080" + net_test.go:210: "enable-default-cni" test finished in 6m16.016980433s, failed=false + net_test.go:211: + ----------------------- debugLogs start: kubenet-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-t8glm + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.94.2 kubenet-999044 + 192.168.94.1 host.minikube.internal + 192.168.94.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.94.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/kubenet-999044 Ready control-plane 56s v1.34.1 192.168.94.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 55s + default service/netcat ClusterIP 10.110.140.25 8080/TCP 14s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 54s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.94.2:8443 55s + default endpoints/netcat 10.244.0.3:8080 14s + kube-system endpoints/k8s.io-minikube-hostpath 48s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 48s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 54s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 14s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 54s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-t8glm 1/1 Running 0 14s 10.244.0.3 kubenet-999044 + kube-system pod/coredns-66bc5c9577-hh4bv 1/1 Running 0 48s 10.244.0.2 kubenet-999044 + kube-system pod/etcd-kubenet-999044 1/1 Running 0 54s 192.168.94.2 kubenet-999044 + kube-system pod/kube-apiserver-kubenet-999044 1/1 Running 0 54s 192.168.94.2 kubenet-999044 + kube-system pod/kube-controller-manager-kubenet-999044 1/1 Running 0 54s 192.168.94.2 kubenet-999044 + kube-system pod/kube-proxy-hmkqf 1/1 Running 0 48s 192.168.94.2 kubenet-999044 + kube-system pod/kube-scheduler-kubenet-999044 1/1 Running 0 54s 192.168.94.2 kubenet-999044 + kube-system pod/storage-provisioner 1/1 Running 1 (17s ago) 48s 192.168.94.2 kubenet-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + a295c86f81a65 13 seconds ago Ready netcat-cd4db9dbf-t8glm default 0 (default) + 94187b74468ec 48 seconds ago Ready coredns-66bc5c9577-hh4bv kube-system 0 (default) + 0456bb21054f7 48 seconds ago Ready storage-provisioner kube-system 0 (default) + 159282dadd31b 48 seconds ago Ready kube-proxy-hmkqf kube-system 0 (default) + f5a14940f72ed 57 seconds ago Ready kube-controller-manager-kubenet-999044 kube-system 0 (default) + 326a0e373622a 57 seconds ago Ready kube-apiserver-kubenet-999044 kube-system 0 (default) + 5bfd9f8b74cb3 57 seconds ago Ready etcd-kubenet-999044 kube-system 0 (default) + 014017b04a354 57 seconds ago Ready kube-scheduler-kubenet-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + d159c111f804c registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 12 seconds ago Running dnsutils 0 a295c86f81a65 netcat-cd4db9dbf-t8glm default + 44117ed04ae70 6e38f40d628db 17 seconds ago Running storage-provisioner 1 0456bb21054f7 storage-provisioner kube-system + 5bb3c428818ce 52546a367cc9e 47 seconds ago Running coredns 0 94187b74468ec coredns-66bc5c9577-hh4bv kube-system + bf913e59dad47 6e38f40d628db 47 seconds ago Exited storage-provisioner 0 0456bb21054f7 storage-provisioner kube-system + 1d8d284f65369 fc25172553d79 48 seconds ago Running kube-proxy 0 159282dadd31b kube-proxy-hmkqf kube-system + a6515770d118c c3994bc696102 57 seconds ago Running kube-apiserver 0 326a0e373622a kube-apiserver-kubenet-999044 kube-system + abfe56e81cec0 7dd6aaa1717ab 57 seconds ago Running kube-scheduler 0 014017b04a354 kube-scheduler-kubenet-999044 kube-system + 968a630167b8c c80c8dbafe7dd 57 seconds ago Running kube-controller-manager 0 f5a14940f72ed kube-controller-manager-kubenet-999044 kube-system + fb0b7f9fc52f8 5f1f5298c888d 57 seconds ago Running etcd 0 5bfd9f8b74cb3 etcd-kubenet-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:25:54 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 15s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-t8glm + Namespace: default + Priority: 0 + Service Account: default + Node: kubenet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:25:54 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://d159c111f804ca7911d021cb66cb90f114a49379d8f89665bd2b564c36b1d86f + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:25:56 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-v5frl (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-v5frl: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 15s default-scheduler Successfully assigned default/netcat-cd4db9dbf-t8glm to kubenet-999044 + Normal Pulling 14s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 13s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 842ms (842ms including waiting). Image size: 127004766 bytes. + Normal Created 13s kubelet Created container: dnsutils + Normal Started 13s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:25:14 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 49s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 49s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-hh4bv + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: kubenet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:25:20 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://5bb3c428818ce59843b410f45417a4c66f6e83dc657bc0d15c73c0f7488c5a40 + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:25:21 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-fv7tm (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-fv7tm: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 49s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-hh4bv to kubenet-999044 + Normal Pulled 48s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 48s kubelet Created container: coredns + Normal Started 48s kubelet Started container coredns + Warning Unhealthy 27s (x4 over 47s) kubelet Readiness probe failed: HTTP probe failed with statuscode: 503 + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API + .:53 + [INFO] plugin/reload: Running configuration SHA512 = c7556d8fdf49c5e32a9077be8cfb9fc6947bb07e663a10d55b192eb63ad1f2bd9793e8e5f5a36fc9abb1957831eec5c997fd9821790e3990ae9531bf41ecea37 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:43631 - 20310 "HINFO IN 2307109661986508810.1868828175691171870. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.026759719s + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] 10.244.0.3:57107 - 17604 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000208429s + [INFO] 10.244.0.3:54069 - 45217 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000159915s + [INFO] 10.244.0.3:47976 - 13637 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000215305s + [INFO] 10.244.0.3:41930 - 61717 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000171928s + [INFO] 10.244.0.3:41930 - 62026 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.000333021s + [INFO] 10.244.0.3:32868 - 44903 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000181303s + [INFO] 10.244.0.3:59870 - 47472 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000148645s + [INFO] 10.244.0.3:56851 - 16365 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000119945s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-kubenet-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: kubenet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:25:14 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.94.2:8443 + kubernetes.io/config.hash: 3caa1f256b214f1e72ecb80f1b1f693f + kubernetes.io/config.mirror: 3caa1f256b214f1e72ecb80f1b1f693f + kubernetes.io/config.seen: 2025-11-02T23:25:14.356457318Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.94.2 + IPs: + IP: 192.168.94.2 + Controlled By: Node/kubenet-999044 + Containers: + kube-apiserver: + Container ID: docker://a6515770d118cfed5edef69482377fdb3e46eba3b5bb7e9c1eedd657bb1feb5f + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.94.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:25:11 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.94.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.94.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.94.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:25:11.499242 1 options.go:263] external host was not specified, using 192.168.94.2 + I1102 23:25:11.500499 1 server.go:150] Version: v1.34.1 + I1102 23:25:11.500545 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:25:11.695888 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:25:11.696941 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:25:11.697042 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:25:11.697095 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:25:11.697123 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:25:11.697126 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:25:11.697129 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:25:11.697132 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:25:11.697135 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:25:11.697173 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:25:11.697185 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:25:11.697197 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:25:11.714041 1 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.714305 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + W1102 23:25:11.714433 1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.719288 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:11.723184 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:25:11.723198 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:25:11.723344 1 instance.go:239] Using reconciler: lease + W1102 23:25:11.724052 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.854975 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.860691 1 logging.go:55] [core] [Channel #17 SubChannel #20]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.869149 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:25:11.869170 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:25:11.872882 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:25:11.873392 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.876954 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.881236 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.884840 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.888632 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.892807 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.896813 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.901314 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.906143 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.910061 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.915016 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.919770 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.924186 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.928618 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.932661 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.936961 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.940900 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:11.959114 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:25:11.959273 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:25:11.960308 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.965129 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.969090 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.973968 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.979580 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.984800 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.989505 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:11.994476 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:11.998627 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.003150 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.008145 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.012334 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.016894 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.021216 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.024761 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.028901 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.033422 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.040428 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.045115 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.049064 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.052447 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.056134 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.059604 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.064227 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + I1102 23:25:12.067407 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:25:12.068129 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.071788 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.079432 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.083103 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.087822 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.091162 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.095257 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.100456 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:12.103219 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.107576 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.111555 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.114955 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.119778 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.126480 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.130213 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:12.133657 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:12.143757 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:25:12.143769 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.143773 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.144159 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:25:12.144167 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.144573 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:25:12.145015 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:25:12.145023 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:25:12.145026 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:25:12.145888 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:25:12.145901 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:25:12.146448 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:25:12.146456 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.146460 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.147422 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:25:12.147435 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.147440 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:25:12.148348 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:25:12.148368 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.152376 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:25:12.153727 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.154260 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:25:12.154332 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.154341 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.155290 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:25:12.155508 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:25:12.158859 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:25:12.158874 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.158878 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.159157 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:25:12.159162 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.159164 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.160421 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:25:12.160429 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.160432 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.160996 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:25:12.161005 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:25:12.161009 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:25:12.161010 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.163061 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:25:12.163077 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:25:12.163081 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:25:12.164023 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:25:12.164031 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.164034 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:12.164292 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:25:12.164295 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.164322 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:25:12.165246 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:25:12.165281 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:25:12.165292 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:25:12.167027 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:12.171026 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:25:12.171037 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:25:12.391433 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:25:12.391518 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:12.391591 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:25:12.391627 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:25:12.391724 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:25:12.391935 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:25:12.392089 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:25:12.392177 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:25:12.392183 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:25:12.392193 1 controller.go:142] Starting OpenAPI controller + I1102 23:25:12.392210 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:25:12.392220 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:25:12.392228 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:25:12.392239 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:25:12.392240 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:25:12.392244 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:25:12.392250 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:25:12.392260 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:25:12.392429 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:25:12.392448 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:25:12.392458 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:25:12.392463 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:25:12.392468 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:25:12.392480 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:25:12.392484 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:25:12.392496 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:25:12.392453 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:25:12.392532 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:25:12.392627 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:25:12.392634 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:25:12.392652 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:25:12.392660 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:25:12.392688 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:25:12.397969 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:12.399377 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:12.399481 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:12.399688 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:25:12.399695 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:25:12.414869 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:25:12.419453 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:12.419474 1 policy_source.go:240] refreshing policies + I1102 23:25:12.492208 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:25:12.492232 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:25:12.492270 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:25:12.492516 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:25:12.492536 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:25:12.492524 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:25:12.492527 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:25:12.492584 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:25:12.492609 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:25:12.492618 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:25:12.492680 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:25:12.493076 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:25:12.494071 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:25:12.494095 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:12.496299 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:12.496562 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:25:12.499778 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:25:12.499812 1 aggregator.go:171] initial CRD sync complete... + I1102 23:25:12.499819 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:25:12.499825 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:25:12.499829 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:25:12.514488 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:25:13.394678 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:25:13.396614 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:25:13.396622 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:25:13.630750 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:25:13.652151 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:25:13.696593 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:25:13.699717 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.94.2] + I1102 23:25:13.700274 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:25:13.702891 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:25:14.411478 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:25:14.611376 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:25:14.615728 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:25:14.620736 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:25:20.315804 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:20.317536 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:20.463500 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:25:20.512685 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:25:54.841063 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.110.140.25"} + E1102 23:26:01.940664 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49684: use of closed network connection + E1102 23:26:02.020549 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49700: use of closed network connection + E1102 23:26:02.089481 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49726: use of closed network connection + E1102 23:26:02.183493 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49742: use of closed network connection + E1102 23:26:07.257334 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49762: use of closed network connection + E1102 23:26:07.362010 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49794: use of closed network connection + E1102 23:26:07.452193 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49808: use of closed network connection + E1102 23:26:07.533494 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49822: use of closed network connection + E1102 23:26:07.607110 1 conn.go:339] Error on socket receive: read tcp 192.168.94.2:8443->192.168.94.1:49850: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 3: eth0@if368: mtu 1500 qdisc noqueue state UP group default + link/ether 56:8d:24:7d:04:b7 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.94.2/24 brd 192.168.94.255 scope global eth0 + valid_lft forever preferred_lft forever + 4: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether b6:e9:30:a7:03:2c brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 5: cbr0: mtu 1500 qdisc htb state UP group default qlen 1000 + link/ether 06:4f:af:2d:5b:52 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/16 brd 10.244.255.255 scope global cbr0 + valid_lft forever preferred_lft forever + inet6 fe80::44f:afff:fe2d:5b52/64 scope link + valid_lft forever preferred_lft forever + 6: vetha18fa4c7@if3: mtu 1500 qdisc noqueue master cbr0 state UP group default + link/ether 9e:e5:0d:68:d2:e5 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::9ce5:dff:fe68:d2e5/64 scope link + valid_lft forever preferred_lft forever + 7: veth0572d118@if3: mtu 1500 qdisc noqueue master cbr0 state UP group default + link/ether 12:68:44:67:36:8b brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::1068:44ff:fe67:368b/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.94.1 dev eth0 + 10.244.0.0/16 dev cbr0 proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.94.0/24 dev eth0 proto kernel scope link src 192.168.94.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:10 2025 + *mangle + :PREROUTING ACCEPT [21617:56092564] + :INPUT ACCEPT [21558:56088326] + :FORWARD ACCEPT [59:4238] + :OUTPUT ACCEPT [17330:5522344] + :POSTROUTING ACCEPT [17389:5526582] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:26:10 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:10 2025 + *filter + :INPUT ACCEPT [4343:995587] + :FORWARD ACCEPT [16:927] + :OUTPUT ACCEPT [4301:1292093] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:26:10 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:10 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [58:3480] + :POSTROUTING ACCEPT [67:4155] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-RS6KXC4SAKLFSCFF - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.94.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.94.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.94.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING ! -d 10.0.0.0/8 -m comment --comment "kubenet: SNAT for outbound traffic from cluster" -m addrtype ! --dst-type LOCAL -j MASQUERADE + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.94.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:40775 + -A DOCKER_OUTPUT -d 192.168.94.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:35537 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 40775 -j SNAT --to-source 192.168.94.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 35537 -j SNAT --to-source 192.168.94.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-RS6KXC4SAKLFSCFF -s 192.168.94.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-RS6KXC4SAKLFSCFF -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.94.2:8443 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.110.140.25/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.94.2:8443" -j KUBE-SEP-RS6KXC4SAKLFSCFF + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.110.140.25/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:26:10 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 52 3280 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.94.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 59 packets, 3540 bytes) + pkts bytes target prot opt in out source destination + 553 45793 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 352 33867 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.94.1 + 95 5700 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 68 packets, 4215 bytes) + pkts bytes target prot opt in out source destination + 563 46528 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.94.1 + 36 2120 MASQUERADE 0 -- * * 0.0.0.0/0 !10.0.0.0/8 /* kubenet: SNAT for outbound traffic from cluster */ ADDRTYPE match dst-type !LOCAL + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.94.1 tcp dpt:53 to:127.0.0.11:40775 + 353 33952 DNAT 17 -- * * 0.0.0.0/0 192.168.94.1 udp dpt:53 to:127.0.0.11:35537 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:40775 to:192.168.94.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:35537 to:192.168.94.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 68 4215 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-RS6KXC4SAKLFSCFF (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 192.168.94.2 0.0.0.0/0 /* default/kubernetes:https */ + 2 120 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.94.2:8443 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.110.140.25 /* default/netcat cluster IP */ tcp dpt:8080 + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 95 5700 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 2 120 KUBE-SEP-RS6KXC4SAKLFSCFF 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.94.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.110.140.25 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 50s daemonset-controller Created pod: kube-proxy-hmkqf + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-hmkqf + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: kubenet-999044/192.168.94.2 + Start Time: Sun, 02 Nov 2025 23:25:20 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.94.2 + IPs: + IP: 192.168.94.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://1d8d284f653693eff92f9c0d3af3f36498c024ef7bcfb895d5be6e245394dd22 + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:25:20 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-vwwdd (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-vwwdd: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 50s default-scheduler Successfully assigned kube-system/kube-proxy-hmkqf to kubenet-999044 + Normal Pulled 50s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 50s kubelet Created container: kube-proxy + Normal Started 49s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:25:21.122419 1 server_linux.go:53] "Using iptables proxy" + I1102 23:25:21.163950 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:25:21.264080 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:25:21.264104 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.94.2"] + E1102 23:25:21.264149 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:25:21.280540 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:25:21.280570 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:25:21.283841 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:25:21.284111 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:25:21.284127 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:25:21.284805 1 config.go:200] "Starting service config controller" + I1102 23:25:21.284818 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:25:21.284844 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:25:21.284847 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:25:21.284859 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:25:21.284870 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:25:21.285056 1 config.go:309] "Starting node config controller" + I1102 23:25:21.285069 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:25:21.285073 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:25:21.385289 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:25:21.385307 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:25:21.385320 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://kubenet-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://kubenet-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:14 UTC; 56s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2199 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 32.4M + CPU: 985ms + CGroup: /system.slice/kubelet.service + └─2199 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=kubenet-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2 --pod-cidr=10.244.0.0/16 + + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.489839 2199 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf" (OuterVolumeSpecName: "kube-api-access-x2wmf") pod "655b5be4-3758-49f8-acbb-38eb909b8edc" (UID: "655b5be4-3758-49f8-acbb-38eb909b8edc"). InnerVolumeSpecName "kube-api-access-x2wmf". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.589315 2199 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-x2wmf\" (UniqueName: \"kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf\") on node \"kubenet-999044\" DevicePath \"\"" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.589334 2199 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/655b5be4-3758-49f8-acbb-38eb909b8edc-config-volume\") on node \"kubenet-999044\" DevicePath \"\"" + Nov 02 23:25:22 kubenet-999044 kubelet[2199]: I1102 23:25:22.851151 2199 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.367012 2199 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="655b5be4-3758-49f8-acbb-38eb909b8edc" path="/var/lib/kubelet/pods/655b5be4-3758-49f8-acbb-38eb909b8edc/volumes" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.817146 2199 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.817424 2199 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="10.244.0.0/16" newPodCIDR="10.244.0.0/24" + Nov 02 23:25:51 kubenet-999044 kubelet[2199]: I1102 23:25:51.501515 2199 scope.go:117] "RemoveContainer" containerID="bf913e59dad47385f47d88a3e80bdb12aebd5e86c64ca5ffcfd7f26206b64889" + Nov 02 23:25:54 kubenet-999044 kubelet[2199]: I1102 23:25:54.864667 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5frl\" (UniqueName: \"kubernetes.io/projected/9de80383-7400-4d8e-8237-175e17a20102-kube-api-access-v5frl\") pod \"netcat-cd4db9dbf-t8glm\" (UID: \"9de80383-7400-4d8e-8237-175e17a20102\") " pod="default/netcat-cd4db9dbf-t8glm" + Nov 02 23:25:56 kubenet-999044 kubelet[2199]: I1102 23:25:56.530540 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-t8glm" podStartSLOduration=1.687657685 podStartE2EDuration="2.530525755s" podCreationTimestamp="2025-11-02 23:25:54 +0000 UTC" firstStartedPulling="2025-11-02 23:25:55.210967183 +0000 UTC m=+40.899176412" lastFinishedPulling="2025-11-02 23:25:56.05383525 +0000 UTC m=+41.742044482" observedRunningTime="2025-11-02 23:25:56.530503121 +0000 UTC m=+42.218712367" watchObservedRunningTime="2025-11-02 23:25:56.530525755 +0000 UTC m=+42.218735000" + + + >>> host: kubelet daemon config: + # ]8;;file://kubenet-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://kubenet-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=kubenet-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.94.2 --pod-cidr=10.244.0.0/16 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:25:05 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:25:05 kubenet-999044 kubelet[1522]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:05 kubenet-999044 kubelet[1522]: E1102 23:25:05.863452 1522 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:05 kubenet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:05 kubenet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:06 kubenet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:06 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:25:06 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:25:06 kubenet-999044 kubelet[1610]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:06 kubenet-999044 kubelet[1610]: E1102 23:25:06.531117 1610 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:06 kubenet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:06 kubenet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:07 kubenet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:07 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:25:07 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:25:07 kubenet-999044 kubelet[1692]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:07 kubenet-999044 kubelet[1692]: E1102 23:25:07.181026 1692 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:07 kubenet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:07 kubenet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:07 kubenet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:07 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:25:07 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:25:08 kubenet-999044 kubelet[1701]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:08 kubenet-999044 kubelet[1701]: E1102 23:25:08.026574 1701 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:08 kubenet-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:08 kubenet-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:08 kubenet-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:08 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:25:08 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:25:08 kubenet-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 669. + Nov 02 23:25:08 kubenet-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:25:08 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:25:08 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:25:08 kubenet-999044 kubelet[1738]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.432495 1738 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.432546 1738 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.432570 1738 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.432579 1738 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.432737 1738 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: E1102 23:25:09.919995 1738 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.94.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.920592 1738 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.922653 1738 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926098 1738 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926117 1738 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926261 1738 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926275 1738 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"kubenet-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926376 1738 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926381 1738 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:09 kubenet-999044 kubelet[1738]: I1102 23:25:09.926440 1738 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.110048 1738 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.110170 1738 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.110185 1738 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.110206 1738 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.110224 1738 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.110721 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.94.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dkubenet-999044&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.111221 1738 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.111273 1738 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/16" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.111494 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.94.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.111811 1738 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/16" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.112247 1738 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.112271 1738 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: W1102 23:25:10.112314 1738 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.113559 1738 server.go:1262] "Started kubelet" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.113850 1738 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.113800 1738 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.113908 1738 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114123 1738 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114275 1738 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.114388 1738 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.94.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.94.2:8443: connect: connection refused" event="&Event{ObjectMeta:{kubenet-999044.1874542dd9d44a70 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:kubenet-999044,UID:kubenet-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:kubenet-999044,},FirstTimestamp:2025-11-02 23:25:10.113536624 +0000 UTC m=+1.312404932,LastTimestamp:2025-11-02 23:25:10.113536624 +0000 UTC m=+1.312404932,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:kubenet-999044,}" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114524 1738 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114605 1738 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.114627 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114645 1738 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.114655 1738 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.114936 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.94.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.115243 1738 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kubenet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.116103 1738 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.119562 1738 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.119579 1738 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.119638 1738 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.127381 1738 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.127390 1738 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.127407 1738 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.131322 1738 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.132095 1738 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.132111 1738 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.132124 1738 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.132161 1738 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.132420 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.94.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.216747 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.232907 1738 kubelet.go:2451] "Skipping pod synchronization" err="container runtime status check may not have completed yet" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.316497 1738 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kubenet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.317515 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.417892 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.433158 1738 kubelet.go:2451] "Skipping pod synchronization" err="container runtime status check may not have completed yet" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.518761 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.550978 1738 policy_none.go:49] "None policy: Start" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.550996 1738 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.551005 1738 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.571364 1738 policy_none.go:47] "Start" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.597702 1738 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.597796 1738 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.597805 1738 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.598010 1738 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.599163 1738 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.599191 1738 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"kubenet-999044\" not found" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.698640 1738 kubelet_node_status.go:75] "Attempting to register node" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.699072 1738 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.94.2:8443/api/v1/nodes\": dial tcp 192.168.94.2:8443: connect: connection refused" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.717675 1738 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.94.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/kubenet-999044?timeout=10s\": dial tcp 192.168.94.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.852201 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.856297 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.866265 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.868825 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.900467 1738 kubelet_node_status.go:75] "Attempting to register node" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.900946 1738 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.94.2:8443/api/v1/nodes\": dial tcp 192.168.94.2:8443: connect: connection refused" node="kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921363 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-ca-certs\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921379 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/96f99faf58af39f5cd2954559d07e883-kubeconfig\") pod \"kube-scheduler-kubenet-999044\" (UID: \"96f99faf58af39f5cd2954559d07e883\") " pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921391 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-ca-certs\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921402 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-usr-local-share-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921414 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-usr-share-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921457 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-etc-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921488 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-usr-share-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921504 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/47ac623026060081cffae7f09bf52808-etcd-data\") pod \"etcd-kubenet-999044\" (UID: \"47ac623026060081cffae7f09bf52808\") " pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921524 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-k8s-certs\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921539 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-kubeconfig\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921563 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/47ac623026060081cffae7f09bf52808-etcd-certs\") pod \"etcd-kubenet-999044\" (UID: \"47ac623026060081cffae7f09bf52808\") " pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921580 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-flexvolume-dir\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921593 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-usr-local-share-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921608 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-etc-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: I1102 23:25:10.921625 1738 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-k8s-certs\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:10 kubenet-999044 kubelet[1738]: E1102 23:25:10.967394 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.94.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Dkubenet-999044&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:11 kubenet-999044 kubelet[1738]: E1102 23:25:11.072394 1738 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.94.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.94.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:11 kubenet-999044 kubelet[1738]: I1102 23:25:11.301979 1738 kubelet_node_status.go:75] "Attempting to register node" node="kubenet-999044" + Nov 02 23:25:11 kubenet-999044 kubelet[1738]: E1102 23:25:11.302259 1738 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.94.2:8443/api/v1/nodes\": dial tcp 192.168.94.2:8443: connect: connection refused" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.103860 1738 kubelet_node_status.go:75] "Attempting to register node" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.143607 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.148859 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.154207 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.160404 1738 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.411065 1738 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"kubenet-999044\" not found" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.507736 1738 kubelet_node_status.go:78] "Successfully registered node" node="kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.507868 1738 kubelet_node_status.go:486] "Error updating node status, will retry" err="error getting node \"kubenet-999044\": node \"kubenet-999044\" not found" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.523051 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.623281 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.723861 1738 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.815260 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.817827 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.817844 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.818817 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.818827 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.819663 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.819672 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: E1102 23:25:12.820589 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:12 kubenet-999044 kubelet[1738]: I1102 23:25:12.825459 1738 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: I1102 23:25:13.113296 1738 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: I1102 23:25:13.116438 1738 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: I1102 23:25:13.161472 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: I1102 23:25:13.161546 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: I1102 23:25:13.161608 1738 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: E1102 23:25:13.162540 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: E1102 23:25:13.162573 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:13 kubenet-999044 kubelet[1738]: E1102 23:25:13.162643 1738 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-kubenet-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[1738]: I1102 23:25:14.282091 1738 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:14 kubenet-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:14 kubenet-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:25:14 kubenet-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:25:14 kubenet-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: Flag --pod-cidr has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.337694 2199 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.337731 2199 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.337751 2199 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.337755 2199 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.337893 2199 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.338552 2199 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.339702 2199 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.342221 2199 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345251 2199 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345266 2199 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345378 2199 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345403 2199 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"kubenet-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345501 2199 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345507 2199 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345522 2199 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345863 2199 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.345989 2199 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346002 2199 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346014 2199 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346031 2199 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346575 2199 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346638 2199 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/16" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.346878 2199 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/16" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347160 2199 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347175 2199 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347652 2199 server.go:1262] "Started kubelet" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347724 2199 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347879 2199 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.347943 2199 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.348142 2199 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.350679 2199 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: E1102 23:25:14.350985 2199 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"kubenet-999044\" not found" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.351039 2199 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.351248 2199 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.351421 2199 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.352008 2199 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.355094 2199 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.355476 2199 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.355530 2199 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.356891 2199 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.358751 2199 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.363069 2199 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.363096 2199 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.363115 2199 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: E1102 23:25:14.363152 2199 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372101 2199 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372113 2199 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372131 2199 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372231 2199 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372238 2199 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372251 2199 policy_none.go:49] "None policy: Start" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372258 2199 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372266 2199 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372333 2199 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.372338 2199 policy_none.go:47] "Start" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: E1102 23:25:14.374855 2199 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.375012 2199 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.375019 2199 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.375170 2199 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: E1102 23:25:14.375503 2199 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.464548 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.464561 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.464591 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.464757 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.475598 2199 kubelet_node_status.go:75] "Attempting to register node" node="kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.478713 2199 kubelet_node_status.go:124] "Node was previously registered" node="kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.478762 2199 kubelet_node_status.go:78] "Successfully registered node" node="kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652216 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-flexvolume-dir\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652258 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-usr-share-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652272 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/96f99faf58af39f5cd2954559d07e883-kubeconfig\") pod \"kube-scheduler-kubenet-999044\" (UID: \"96f99faf58af39f5cd2954559d07e883\") " pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652285 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-usr-local-share-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652296 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-k8s-certs\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652306 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-usr-local-share-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652321 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-etc-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652330 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-k8s-certs\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652340 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-ca-certs\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652351 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-etc-ca-certificates\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652361 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/0e3f9da477512652af671b19b32fb95e-kubeconfig\") pod \"kube-controller-manager-kubenet-999044\" (UID: \"0e3f9da477512652af671b19b32fb95e\") " pod="kube-system/kube-controller-manager-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652388 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/47ac623026060081cffae7f09bf52808-etcd-certs\") pod \"etcd-kubenet-999044\" (UID: \"47ac623026060081cffae7f09bf52808\") " pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652426 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/47ac623026060081cffae7f09bf52808-etcd-data\") pod \"etcd-kubenet-999044\" (UID: \"47ac623026060081cffae7f09bf52808\") " pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652440 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-usr-share-ca-certificates\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:14 kubenet-999044 kubelet[2199]: I1102 23:25:14.652473 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/3caa1f256b214f1e72ecb80f1b1f693f-ca-certs\") pod \"kube-apiserver-kubenet-999044\" (UID: \"3caa1f256b214f1e72ecb80f1b1f693f\") " pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.347211 2199 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.352012 2199 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.383996 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.384009 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.384247 2199 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: E1102 23:25:15.387892 2199 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-kubenet-999044\" already exists" pod="kube-system/kube-scheduler-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: E1102 23:25:15.388204 2199 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-kubenet-999044\" already exists" pod="kube-system/etcd-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: E1102 23:25:15.388532 2199 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-kubenet-999044\" already exists" pod="kube-system/kube-apiserver-kubenet-999044" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.399665 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-kubenet-999044" podStartSLOduration=1.399654866 podStartE2EDuration="1.399654866s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.399601013 +0000 UTC m=+1.087810258" watchObservedRunningTime="2025-11-02 23:25:15.399654866 +0000 UTC m=+1.087864102" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.399734 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-kubenet-999044" podStartSLOduration=1.399730336 podStartE2EDuration="1.399730336s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.395710593 +0000 UTC m=+1.083919837" watchObservedRunningTime="2025-11-02 23:25:15.399730336 +0000 UTC m=+1.087939579" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.403896 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-kubenet-999044" podStartSLOduration=1.403884894 podStartE2EDuration="1.403884894s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.403854595 +0000 UTC m=+1.092063853" watchObservedRunningTime="2025-11-02 23:25:15.403884894 +0000 UTC m=+1.092094157" + Nov 02 23:25:15 kubenet-999044 kubelet[2199]: I1102 23:25:15.408218 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-kubenet-999044" podStartSLOduration=1.408208922 podStartE2EDuration="1.408208922s" podCreationTimestamp="2025-11-02 23:25:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:15.407996876 +0000 UTC m=+1.096206116" watchObservedRunningTime="2025-11-02 23:25:15.408208922 +0000 UTC m=+1.096418167" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.480832 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/b6689fce-16db-42d8-bf5e-37b332f3f09b-xtables-lock\") pod \"kube-proxy-hmkqf\" (UID: \"b6689fce-16db-42d8-bf5e-37b332f3f09b\") " pod="kube-system/kube-proxy-hmkqf" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.480861 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/b6689fce-16db-42d8-bf5e-37b332f3f09b-kube-proxy\") pod \"kube-proxy-hmkqf\" (UID: \"b6689fce-16db-42d8-bf5e-37b332f3f09b\") " pod="kube-system/kube-proxy-hmkqf" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.480873 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/b6689fce-16db-42d8-bf5e-37b332f3f09b-lib-modules\") pod \"kube-proxy-hmkqf\" (UID: \"b6689fce-16db-42d8-bf5e-37b332f3f09b\") " pod="kube-system/kube-proxy-hmkqf" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.480887 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwwdd\" (UniqueName: \"kubernetes.io/projected/b6689fce-16db-42d8-bf5e-37b332f3f09b-kube-api-access-vwwdd\") pod \"kube-proxy-hmkqf\" (UID: \"b6689fce-16db-42d8-bf5e-37b332f3f09b\") " pod="kube-system/kube-proxy-hmkqf" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: E1102 23:25:20.637200 2199 pod_workers.go:1324] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-x2wmf], unattached volumes=[], failed to process volumes=[config-volume kube-api-access-x2wmf]: context canceled" pod="kube-system/coredns-66bc5c9577-dgvdm" podUID="655b5be4-3758-49f8-acbb-38eb909b8edc" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683035 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2wmf\" (UniqueName: \"kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf\") pod \"coredns-66bc5c9577-dgvdm\" (UID: \"655b5be4-3758-49f8-acbb-38eb909b8edc\") " pod="kube-system/coredns-66bc5c9577-dgvdm" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683121 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4044631f-cb2d-4ff6-a090-916bd919c35f-config-volume\") pod \"coredns-66bc5c9577-hh4bv\" (UID: \"4044631f-cb2d-4ff6-a090-916bd919c35f\") " pod="kube-system/coredns-66bc5c9577-hh4bv" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683139 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv7tm\" (UniqueName: \"kubernetes.io/projected/4044631f-cb2d-4ff6-a090-916bd919c35f-kube-api-access-fv7tm\") pod \"coredns-66bc5c9577-hh4bv\" (UID: \"4044631f-cb2d-4ff6-a090-916bd919c35f\") " pod="kube-system/coredns-66bc5c9577-hh4bv" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683159 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/f33bede9-924d-4f8f-87ac-9a7c4bf5f55c-tmp\") pod \"storage-provisioner\" (UID: \"f33bede9-924d-4f8f-87ac-9a7c4bf5f55c\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683178 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/655b5be4-3758-49f8-acbb-38eb909b8edc-config-volume\") pod \"coredns-66bc5c9577-dgvdm\" (UID: \"655b5be4-3758-49f8-acbb-38eb909b8edc\") " pod="kube-system/coredns-66bc5c9577-dgvdm" + Nov 02 23:25:20 kubenet-999044 kubelet[2199]: I1102 23:25:20.683214 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzp52\" (UniqueName: \"kubernetes.io/projected/f33bede9-924d-4f8f-87ac-9a7c4bf5f55c-kube-api-access-rzp52\") pod \"storage-provisioner\" (UID: \"f33bede9-924d-4f8f-87ac-9a7c4bf5f55c\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.424616 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.424600065 podStartE2EDuration="1.424600065s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:21.409462961 +0000 UTC m=+7.097672213" watchObservedRunningTime="2025-11-02 23:25:21.424600065 +0000 UTC m=+7.112809311" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.434133 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-hmkqf" podStartSLOduration=1.434119245 podStartE2EDuration="1.434119245s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:21.425336852 +0000 UTC m=+7.113546099" watchObservedRunningTime="2025-11-02 23:25:21.434119245 +0000 UTC m=+7.122328569" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.442945 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-hh4bv" podStartSLOduration=1.442906407 podStartE2EDuration="1.442906407s" podCreationTimestamp="2025-11-02 23:25:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:21.43561515 +0000 UTC m=+7.123824395" watchObservedRunningTime="2025-11-02 23:25:21.442906407 +0000 UTC m=+7.131115767" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.488309 2199 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2wmf\" (UniqueName: \"kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf\") pod \"655b5be4-3758-49f8-acbb-38eb909b8edc\" (UID: \"655b5be4-3758-49f8-acbb-38eb909b8edc\") " + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.488338 2199 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/655b5be4-3758-49f8-acbb-38eb909b8edc-config-volume\") pod \"655b5be4-3758-49f8-acbb-38eb909b8edc\" (UID: \"655b5be4-3758-49f8-acbb-38eb909b8edc\") " + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.488760 2199 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/655b5be4-3758-49f8-acbb-38eb909b8edc-config-volume" (OuterVolumeSpecName: "config-volume") pod "655b5be4-3758-49f8-acbb-38eb909b8edc" (UID: "655b5be4-3758-49f8-acbb-38eb909b8edc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.489839 2199 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf" (OuterVolumeSpecName: "kube-api-access-x2wmf") pod "655b5be4-3758-49f8-acbb-38eb909b8edc" (UID: "655b5be4-3758-49f8-acbb-38eb909b8edc"). InnerVolumeSpecName "kube-api-access-x2wmf". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.589315 2199 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-x2wmf\" (UniqueName: \"kubernetes.io/projected/655b5be4-3758-49f8-acbb-38eb909b8edc-kube-api-access-x2wmf\") on node \"kubenet-999044\" DevicePath \"\"" + Nov 02 23:25:21 kubenet-999044 kubelet[2199]: I1102 23:25:21.589334 2199 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/655b5be4-3758-49f8-acbb-38eb909b8edc-config-volume\") on node \"kubenet-999044\" DevicePath \"\"" + Nov 02 23:25:22 kubenet-999044 kubelet[2199]: I1102 23:25:22.851151 2199 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.367012 2199 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="655b5be4-3758-49f8-acbb-38eb909b8edc" path="/var/lib/kubelet/pods/655b5be4-3758-49f8-acbb-38eb909b8edc/volumes" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.817146 2199 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:24 kubenet-999044 kubelet[2199]: I1102 23:25:24.817424 2199 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="10.244.0.0/16" newPodCIDR="10.244.0.0/24" + Nov 02 23:25:51 kubenet-999044 kubelet[2199]: I1102 23:25:51.501515 2199 scope.go:117] "RemoveContainer" containerID="bf913e59dad47385f47d88a3e80bdb12aebd5e86c64ca5ffcfd7f26206b64889" + Nov 02 23:25:54 kubenet-999044 kubelet[2199]: I1102 23:25:54.864667 2199 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5frl\" (UniqueName: \"kubernetes.io/projected/9de80383-7400-4d8e-8237-175e17a20102-kube-api-access-v5frl\") pod \"netcat-cd4db9dbf-t8glm\" (UID: \"9de80383-7400-4d8e-8237-175e17a20102\") " pod="default/netcat-cd4db9dbf-t8glm" + Nov 02 23:25:56 kubenet-999044 kubelet[2199]: I1102 23:25:56.530540 2199 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-t8glm" podStartSLOduration=1.687657685 podStartE2EDuration="2.530525755s" podCreationTimestamp="2025-11-02 23:25:54 +0000 UTC" firstStartedPulling="2025-11-02 23:25:55.210967183 +0000 UTC m=+40.899176412" lastFinishedPulling="2025-11-02 23:25:56.05383525 +0000 UTC m=+41.742044482" observedRunningTime="2025-11-02 23:25:56.530503121 +0000 UTC m=+42.218712367" watchObservedRunningTime="2025-11-02 23:25:56.530525755 +0000 UTC m=+42.218735000" + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.94.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:kubenet-999044 + name: system:node:kubenet-999044@mk + current-context: system:node:kubenet-999044@mk + kind: Config + users: + - name: system:node:kubenet-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: enable-default-cni-999044 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: kubenet-999044 + contexts: + - context: + cluster: enable-default-cni-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: enable-default-cni-999044 + name: enable-default-cni-999044 + - context: + cluster: kubenet-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:20 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: kubenet-999044 + name: kubenet-999044 + current-context: enable-default-cni-999044 + kind: Config + users: + - name: enable-default-cni-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.key + - name: kubenet-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kubenet-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "298" + uid: 7642c70b-8e72-4ed3-be83-ae61ef060499 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "299" + uid: ed03ba58-0415-4309-8810-ddd6e94785bf + - apiVersion: v1 + data: + jws-kubeconfig-08vk4k: eyJhbGciOiJIUzI1NiIsImtpZCI6IjA4dms0ayJ9..mbKhC-KwG8R2KKMbL2Jg80CCZbLaOsj6VaQiALM7nhw + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:13Z" + name: cluster-info + namespace: kube-public + resourceVersion: "297" + uid: 21a3f4ca-64a6-4484-99fd-500ffee56b12 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "300" + uid: 4eaa42de-718b-4ba7-8b10-dd5af41c0f54 + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.94.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + name: coredns + namespace: kube-system + resourceVersion: "317" + uid: 8888147e-d895-44c5-b21c-c2fc9b3ac1a9 + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIIMNjbfXQDkCkwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMyMDA2WhcNMzUxMDMxMjMyNTA2 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBANwrzwZSSJJB4Lm87OyXV/2p2TEowshFZwT7d7CWTiTMUkMb + irwaCLQIsGJ6p3QRbVxfVFxVKrxRSDKuUtcM1oC0wdHE2vVNDTOuNIjVyKF1f04c + a1w3DDTjzFYrynPzkMnWmtU8m9h/FmBhrZ9ZOZNDH/KPmRRQ/Si1YaTKnIyj0HSR + GmI17fvIRiyi8zojRCRYmFSUNcRdLYxR+LJtq2OS0sxyGLyuzCr7QFfRVwlKE9YG + FNi9fhmEnBaENK8IRSqPq/ZRGRosKXVrbMG248J09bovPNS6d6EhKpSQr0Tl5XwM + EDm9YrC+s4RPLQBMT6tiSUfIoVm2WAFHdDIZ1EECAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIQDVGLUE4w2MoeUq50h + yymLqzjwMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAdZJFJ1bbh568LeNZNRc9jC2VNLl8uHjwfLG7WR7VxQCckK3jiYFGMGr15 + yeHRITN/2t3Tt+T/r+iW7a1ORU06G1Rth0ae5nVhCyJ/P1iJDP+kMUOG1LeX4nar + PBbST6i8mt9oSBg40QDv3wvd9Y40sv4wAB9++Y2wOkZTQhLzmhplmLZ0uCojObcx + YexJKvLZhUVQAGzJq8Rs+cH5bhqtBFUTPJf/+6O1IUKMBgugXiRHLtOXJUgM3Dvr + 3y0uYWAF3quu53pSpFHl3QcpM9rNaLN+yzFJwFPAcQn/Ptk3rZUAXV8oYqb+ARo9 + CNoPl2s8sFtHbrF3R4Be5d94ohdw + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:12Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "11" + uid: a4f34b34-65c0-4f89-8612-d6f22ed6e3f9 + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:12Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "62" + uid: df466487-b68e-4527-8094-2e4f0139191e + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:14Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "252" + uid: f848d8b3-cdc7-4fbe-8a02-5b0e279df0ec + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:20Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "301" + uid: 1c7afc07-c726-4915-9483-4ceabec7f9fe + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.94.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:13Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "207" + uid: ebf471b2-5dec-4acf-b943-85c8d38451ea + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:13Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "210" + uid: 4bd45cef-c836-44ce-9833-4f5599e254bb + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://kubenet-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:05 UTC; 1min 7s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1039 (dockerd) + Tasks: 14 + Memory: 170.2M + CPU: 2.496s + CGroup: /system.slice/docker.service + └─1039 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.184967918Z" level=info msg="Loading containers: done." + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.192621961Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.192664751Z" level=info msg="Initializing buildkit" + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.208910000Z" level=info msg="Completed buildkit initialization" + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.215292696Z" level=info msg="Daemon has completed initialization" + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.215339872Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.215383334Z" level=info msg="API listen on [::]:2376" + Nov 02 23:25:05 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:05.215435164Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:25:05 kubenet-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:25:51 kubenet-999044 dockerd[1039]: time="2025-11-02T23:25:51.138994575Z" level=info msg="ignoring event" container=bf913e59dad47385f47d88a3e80bdb12aebd5e86c64ca5ffcfd7f26206b64889 module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://kubenet-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 17 + Running: 16 + Paused: 0 + Stopped: 1 + Images: 9 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: kubenet-999044 + ID: abfe8f62-b4f7-4abc-bea3-cc3da4f81efa + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://kubenet-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://kubenet-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:05 UTC; 1min 9s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1348 (cri-dockerd) + Tasks: 13 + Memory: 17.0M + CPU: 539ms + CGroup: /system.slice/cri-docker.service + └─1348 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=kubenet --hairpin-mode=hairpin-veth + + Nov 02 23:25:20 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:20Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/159282dadd31be4992b72d5c8b3ef412402bc537a02e4d27de5082004157e34c/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0456bb21054f72d98ed90a94032e4a28091cb5187d74d5c546fe8bfb04b5f755/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/94187b74468ecf31043d5979e98b5970e7f5a4f0326e1a2e4abe3a9fce6803df/resolv.conf as [nameserver 192.168.94.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:21Z" level=info msg="Didn't find bandwidth interface, creating" + Nov 02 23:25:21 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:21Z" level=info msg="Running: tc qdisc add dev cbr0 root handle 1: htb default 30" + Nov 02 23:25:21 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:21Z" level=info msg="Output from tc: " + Nov 02 23:25:24 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:24Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:25:24 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:24Z" level=info msg="Ignoring subsequent pod CIDR update to new cidr 10.244.0.0/24" + Nov 02 23:25:55 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:55Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/a295c86f81a65d69c8ca2365ef1eb8d65e9a3cde799317186b2a89850fe5eaf9/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:56 kubenet-999044 cri-dockerd[1348]: time="2025-11-02T23:25:56Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://kubenet-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://kubenet-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=kubenet --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=kubenet --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://kubenet-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:04 UTC; 1min 11s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1026 (containerd) + Tasks: 187 + Memory: 93.9M + CPU: 1.039s + CGroup: /system.slice/containerd.service + ├─1026 /usr/bin/containerd + ├─1789 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5bfd9f8b74cb3ed02bef4e63c0a54a27aa766d71ba98fbba8ec94540e1879e5d -address /run/containerd/containerd.sock + ├─1792 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 014017b04a3541555b72356c04b2034f99ee30398cdb8e7c04d791a4b9f52b75 -address /run/containerd/containerd.sock + ├─1847 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 326a0e373622a425b6de9e0f347ca485f424da539380a7bf7ef6aa2efd7a9a06 -address /run/containerd/containerd.sock + ├─1864 /usr/bin/containerd-shim-runc-v2 -namespace moby -id f5a14940f72edd311ec93955e39250d92e1704d218f7c0cfc61104fede5428be -address /run/containerd/containerd.sock + ├─1978 /usr/bin/containerd-shim-runc-v2 -namespace moby -id abfe56e81cec0059eeba27e1793aa33257fbd2b277e5413f8e865e8e6607387c -address /run/containerd/containerd.sock + ├─1980 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 968a630167b8c641869e61fbb85201f2d7d3c8fa806fb29877b367bd1ac247c9 -address /run/containerd/containerd.sock + ├─1992 /usr/bin/containerd-shim-runc-v2 -namespace moby -id fb0b7f9fc52f8b062ffee85677e706f37110db344a9f5865b865aea471c727d7 -address /run/containerd/containerd.sock + ├─2080 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a6515770d118cfed5edef69482377fdb3e46eba3b5bb7e9c1eedd657bb1feb5f -address /run/containerd/containerd.sock + ├─2485 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 159282dadd31be4992b72d5c8b3ef412402bc537a02e4d27de5082004157e34c -address /run/containerd/containerd.sock + ├─2530 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0456bb21054f72d98ed90a94032e4a28091cb5187d74d5c546fe8bfb04b5f755 -address /run/containerd/containerd.sock + ├─2568 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1d8d284f653693eff92f9c0d3af3f36498c024ef7bcfb895d5be6e245394dd22 -address /run/containerd/containerd.sock + ├─2588 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 94187b74468ecf31043d5979e98b5970e7f5a4f0326e1a2e4abe3a9fce6803df -address /run/containerd/containerd.sock + ├─2740 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 5bb3c428818ce59843b410f45417a4c66f6e83dc657bc0d15c73c0f7488c5a40 -address /run/containerd/containerd.sock + ├─2969 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 44117ed04ae70ca683385cd4f6daeb94b1a75f229c114550176d86174b4dc160 -address /run/containerd/containerd.sock + ├─3035 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a295c86f81a65d69c8ca2365ef1eb8d65e9a3cde799317186b2a89850fe5eaf9 -address /run/containerd/containerd.sock + └─3142 /usr/bin/containerd-shim-runc-v2 -namespace moby -id d159c111f804ca7911d021cb66cb90f114a49379d8f89665bd2b564c36b1d86f -address /run/containerd/containerd.sock + + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945334340Z" level=info msg="Start event monitor" + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945343353Z" level=info msg="Start snapshots syncer" + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945350683Z" level=info msg=serving... address=/run/containerd/containerd.sock + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945350757Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945364952Z" level=info msg="Start streaming server" + Nov 02 23:25:04 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:04.945412794Z" level=info msg="containerd successfully booted in 0.016729s" + Nov 02 23:25:04 kubenet-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:25:51 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:51.138866233Z" level=info msg="shim disconnected" id=bf913e59dad47385f47d88a3e80bdb12aebd5e86c64ca5ffcfd7f26206b64889 namespace=moby + Nov 02 23:25:51 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:51.139012037Z" level=warning msg="cleaning up after shim disconnected" id=bf913e59dad47385f47d88a3e80bdb12aebd5e86c64ca5ffcfd7f26206b64889 namespace=moby + Nov 02 23:25:51 kubenet-999044 containerd[1026]: time="2025-11-02T23:25:51.139024440Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://kubenet-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://kubenet-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://kubenet-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:26:17.806976212Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:17.806994394Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:17.807021986Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:26:17.807036499Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:26:17.807083058Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:26:17.807167532Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: kubenet-999044 [took: 15.744109061s] -------------------------------- + helpers_test.go:175: Cleaning up "kubenet-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p kubenet-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p kubenet-999044: (2.61969942s) +=== CONT TestStartStop/group/default-k8s-diff-port +=== RUN TestStartStop/group/default-k8s-diff-port/serial +=== RUN TestStartStop/group/default-k8s-diff-port/serial/FirstStart + start_stop_delete_test.go:184: (dbg) Run: out/minikube-linux-amd64 start -p default-k8s-diff-port-150516 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + net_test.go:211: + ----------------------- debugLogs start: enable-default-cni-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Server: 10.96.0.10 + Address: 10.96.0.10#53 + + Name: kubernetes.default.svc.cluster.local + Address: 10.96.0.1 + + + + >>> netcat: nc 10.96.0.10 udp/53: + Connection to 10.96.0.10 53 port [udp/*] succeeded! + + + >>> netcat: nc 10.96.0.10 tcp/53: + Connection to 10.96.0.10 53 port [tcp/*] succeeded! + + + >>> netcat: /etc/nsswitch.conf: + cat: can't open '/etc/nsswitch.conf': No such file or directory + command terminated with exit code 1 + + + >>> netcat: /etc/hosts: + # Kubernetes-managed hosts file. + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00::0 ip6-localnet + fe00::0 ip6-mcastprefix + fe00::1 ip6-allnodes + fe00::2 ip6-allrouters + 10.244.0.3 netcat-cd4db9dbf-h69w6 + + + >>> netcat: /etc/resolv.conf: + nameserver 10.96.0.10 + search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + + >>> host: /etc/nsswitch.conf: + # /etc/nsswitch.conf + # + # Example configuration of GNU Name Service Switch functionality. + # If you have the `glibc-doc-reference' and `info' packages installed, try: + # `info libc "Name Service Switch"' for information about this file. + + passwd: files + group: files + shadow: files + gshadow: files + + hosts: files dns + networks: files + + protocols: db files + services: db files + ethers: db files + rpc: db files + + netgroup: nis + + + >>> host: /etc/hosts: + 127.0.0.1 localhost + ::1 localhost ip6-localhost ip6-loopback + fe00:: ip6-localnet + ff00:: ip6-mcastprefix + ff02::1 ip6-allnodes + ff02::2 ip6-allrouters + 192.168.76.2 enable-default-cni-999044 + 192.168.76.1 host.minikube.internal + 192.168.76.2 control-plane.minikube.internal + + + >>> host: /etc/resolv.conf: + # Generated by Docker Engine. + # This file can be edited; Docker Engine will not make further changes once it + # has been modified. + + nameserver 192.168.76.1 + search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal + options ndots:5 + + # Based on host file: '/etc/resolv.conf' (internal resolver) + # ExtServers: [host(10.35.240.10)] + # Overrides: [] + # Option ndots from: host + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Warning: v1 Endpoints is deprecated in v1.33+; use discovery.k8s.io/v1 EndpointSlice + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME + node/enable-default-cni-999044 Ready control-plane 58s v1.34.1 192.168.76.2 Debian GNU/Linux 12 (bookworm) 6.6.97+ docker://28.5.1 + + NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR + default service/kubernetes ClusterIP 10.96.0.1 443/TCP 57s + default service/netcat ClusterIP 10.108.236.102 8080/TCP 15s app=netcat + kube-system service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 56s k8s-app=kube-dns + + NAMESPACE NAME ENDPOINTS AGE + default endpoints/kubernetes 192.168.76.2:8443 57s + default endpoints/netcat 10.244.0.3:8080 15s + kube-system endpoints/k8s.io-minikube-hostpath 50s + kube-system endpoints/kube-dns 10.244.0.2:53,10.244.0.2:53,10.244.0.2:9153 50s + + NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR + kube-system daemonset.apps/kube-proxy 1 1 1 1 1 kubernetes.io/os=linux 56s kube-proxy registry.k8s.io/kube-proxy:v1.34.1 k8s-app=kube-proxy + + NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR + default deployment.apps/netcat 1/1 1 1 15s dnsutils registry.k8s.io/e2e-test-images/agnhost:2.40 app=netcat + kube-system deployment.apps/coredns 1/1 1 1 56s coredns registry.k8s.io/coredns/coredns:v1.12.1 k8s-app=kube-dns + + NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + default pod/netcat-cd4db9dbf-h69w6 1/1 Running 0 15s 10.244.0.3 enable-default-cni-999044 + kube-system pod/coredns-66bc5c9577-6bblg 1/1 Running 0 50s 10.244.0.2 enable-default-cni-999044 + kube-system pod/etcd-enable-default-cni-999044 1/1 Running 0 56s 192.168.76.2 enable-default-cni-999044 + kube-system pod/kube-apiserver-enable-default-cni-999044 1/1 Running 0 56s 192.168.76.2 enable-default-cni-999044 + kube-system pod/kube-controller-manager-enable-default-cni-999044 1/1 Running 0 56s 192.168.76.2 enable-default-cni-999044 + kube-system pod/kube-proxy-nlt66 1/1 Running 0 50s 192.168.76.2 enable-default-cni-999044 + kube-system pod/kube-scheduler-enable-default-cni-999044 1/1 Running 0 57s 192.168.76.2 enable-default-cni-999044 + kube-system pod/storage-provisioner 1/1 Running 1 (19s ago) 50s 192.168.76.2 enable-default-cni-999044 + + + >>> host: crictl pods: + POD ID CREATED STATE NAME NAMESPACE ATTEMPT RUNTIME + cd4b0636d616e 16 seconds ago Ready netcat-cd4db9dbf-h69w6 default 0 (default) + 1a71a9959cad6 50 seconds ago Ready storage-provisioner kube-system 0 (default) + ae92d332e0bd5 51 seconds ago Ready coredns-66bc5c9577-6bblg kube-system 0 (default) + 46b9bcc4a2b5b 51 seconds ago Ready kube-proxy-nlt66 kube-system 0 (default) + aa5787d6ac78f About a minute ago Ready etcd-enable-default-cni-999044 kube-system 0 (default) + 65191f66ec2e3 About a minute ago Ready kube-scheduler-enable-default-cni-999044 kube-system 0 (default) + f0d983a7afd43 About a minute ago Ready kube-controller-manager-enable-default-cni-999044 kube-system 0 (default) + 08a183f3081c9 About a minute ago Ready kube-apiserver-enable-default-cni-999044 kube-system 0 (default) + + + >>> host: crictl containers: + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 3c4a0d7e1f985 registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 15 seconds ago Running dnsutils 0 cd4b0636d616e netcat-cd4db9dbf-h69w6 default + f6d6851014dc3 6e38f40d628db 20 seconds ago Running storage-provisioner 1 1a71a9959cad6 storage-provisioner kube-system + 6591222ccfe8c 6e38f40d628db 50 seconds ago Exited storage-provisioner 0 1a71a9959cad6 storage-provisioner kube-system + ee284adbbe6da 52546a367cc9e 50 seconds ago Running coredns 0 ae92d332e0bd5 coredns-66bc5c9577-6bblg kube-system + c5a1cf7eb5787 fc25172553d79 51 seconds ago Running kube-proxy 0 46b9bcc4a2b5b kube-proxy-nlt66 kube-system + a9b561b41c6b3 5f1f5298c888d About a minute ago Running etcd 0 aa5787d6ac78f etcd-enable-default-cni-999044 kube-system + 1a2c7edf6b6cb c3994bc696102 About a minute ago Running kube-apiserver 0 08a183f3081c9 kube-apiserver-enable-default-cni-999044 kube-system + 0627f9ec285b4 c80c8dbafe7dd About a minute ago Running kube-controller-manager 0 f0d983a7afd43 kube-controller-manager-enable-default-cni-999044 kube-system + 65dfdc5a33813 7dd6aaa1717ab About a minute ago Running kube-scheduler 0 65191f66ec2e3 kube-scheduler-enable-default-cni-999044 kube-system + + + >>> k8s: describe netcat deployment: + Name: netcat + Namespace: default + CreationTimestamp: Sun, 02 Nov 2025 23:26:06 +0000 + Labels: app=netcat + Annotations: deployment.kubernetes.io/revision: 1 + Selector: app=netcat + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 25% max unavailable, 25% max surge + Pod Template: + Labels: app=netcat + Containers: + dnsutils: + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + Environment: + Mounts: + Volumes: + Node-Selectors: + Tolerations: + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: netcat-cd4db9dbf (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 16s deployment-controller Scaled up replica set netcat-cd4db9dbf from 0 to 1 + + + >>> k8s: describe netcat pod(s): + Name: netcat-cd4db9dbf-h69w6 + Namespace: default + Priority: 0 + Service Account: default + Node: enable-default-cni-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:26:06 +0000 + Labels: app=netcat + pod-template-hash=cd4db9dbf + Annotations: + Status: Running + IP: 10.244.0.3 + IPs: + IP: 10.244.0.3 + Controlled By: ReplicaSet/netcat-cd4db9dbf + Containers: + dnsutils: + Container ID: docker://3c4a0d7e1f985bd3755fb144cad41b52d34a0ee7332610f88fc1eb509b45d335 + Image: registry.k8s.io/e2e-test-images/agnhost:2.40 + Image ID: docker-pullable://registry.k8s.io/e2e-test-images/agnhost@sha256:af7e3857d87770ddb40f5ea4f89b5a2709504ab1ee31f9ea4ab5823c045f2146 + Port: + Host Port: + Command: + /bin/sh + -c + while true; do echo hello | nc -l -p 8080; done + State: Running + Started: Sun, 02 Nov 2025 23:26:08 +0000 + Ready: True + Restart Count: 0 + Environment: + Mounts: + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-knn6m (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-api-access-knn6m: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: + Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 16s default-scheduler Successfully assigned default/netcat-cd4db9dbf-h69w6 to enable-default-cni-999044 + Normal Pulling 16s kubelet Pulling image "registry.k8s.io/e2e-test-images/agnhost:2.40" + Normal Pulled 15s kubelet Successfully pulled image "registry.k8s.io/e2e-test-images/agnhost:2.40" in 999ms (999ms including waiting). Image size: 127004766 bytes. + Normal Created 14s kubelet Created container: dnsutils + Normal Started 14s kubelet Started container dnsutils + + + >>> k8s: netcat logs: + + + >>> k8s: describe coredns deployment: + Name: coredns + Namespace: kube-system + CreationTimestamp: Sun, 02 Nov 2025 23:25:25 +0000 + Labels: k8s-app=kube-dns + Annotations: deployment.kubernetes.io/revision: 1 + Selector: k8s-app=kube-dns + Replicas: 1 desired | 1 updated | 1 total | 1 available | 0 unavailable + StrategyType: RollingUpdate + MinReadySeconds: 0 + RollingUpdateStrategy: 1 max unavailable, 25% max surge + Pod Template: + Labels: k8s-app=kube-dns + Service Account: coredns + Containers: + coredns: + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + Priority Class Name: system-cluster-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable + OldReplicaSets: + NewReplicaSet: coredns-66bc5c9577 (1/1 replicas created) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 51s deployment-controller Scaled up replica set coredns-66bc5c9577 from 0 to 2 + Normal ScalingReplicaSet 51s deployment-controller Scaled down replica set coredns-66bc5c9577 from 2 to 1 + + + >>> k8s: describe coredns pods: + Name: coredns-66bc5c9577-6bblg + Namespace: kube-system + Priority: 2000000000 + Priority Class Name: system-cluster-critical + Service Account: coredns + Node: enable-default-cni-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:25:31 +0000 + Labels: k8s-app=kube-dns + pod-template-hash=66bc5c9577 + Annotations: + Status: Running + IP: 10.244.0.2 + IPs: + IP: 10.244.0.2 + Controlled By: ReplicaSet/coredns-66bc5c9577 + Containers: + coredns: + Container ID: docker://ee284adbbe6dab5dcaac548d315d00279d7734876007c2412e281bb8461c1aac + Image: registry.k8s.io/coredns/coredns:v1.12.1 + Image ID: docker-pullable://registry.k8s.io/coredns/coredns@sha256:e8c262566636e6bc340ece6473b0eed193cad045384401529721ddbe6463d31c + Ports: 53/UDP (dns), 53/TCP (dns-tcp), 9153/TCP (metrics), 8080/TCP (liveness-probe), 8181/TCP (readiness-probe) + Host Ports: 0/UDP (dns), 0/TCP (dns-tcp), 0/TCP (metrics), 0/TCP (liveness-probe), 0/TCP (readiness-probe) + Args: + -conf + /etc/coredns/Corefile + State: Running + Started: Sun, 02 Nov 2025 23:25:32 +0000 + Ready: True + Restart Count: 0 + Limits: + memory: 170Mi + Requests: + cpu: 100m + memory: 70Mi + Liveness: http-get http://:liveness-probe/health delay=60s timeout=5s period=10s #success=1 #failure=5 + Readiness: http-get http://:readiness-probe/ready delay=0s timeout=1s period=10s #success=1 #failure=3 + Environment: + Mounts: + /etc/coredns from config-volume (ro) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-22pzv (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + config-volume: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: coredns + Optional: false + kube-api-access-22pzv: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: Burstable + Node-Selectors: kubernetes.io/os=linux + Tolerations: CriticalAddonsOnly op=Exists + node-role.kubernetes.io/control-plane:NoSchedule + node.kubernetes.io/not-ready:NoExecute op=Exists for 300s + node.kubernetes.io/unreachable:NoExecute op=Exists for 300s + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 51s default-scheduler Successfully assigned kube-system/coredns-66bc5c9577-6bblg to enable-default-cni-999044 + Normal Pulled 50s kubelet Container image "registry.k8s.io/coredns/coredns:v1.12.1" already present on machine + Normal Created 50s kubelet Created container: coredns + Normal Started 50s kubelet Started container coredns + Warning Unhealthy 28s (x4 over 48s) kubelet Readiness probe failed: HTTP probe failed with statuscode: 503 + + + >>> k8s: coredns logs: + maxprocs: Leaving GOMAXPROCS=8: CPU quota undefined + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [INFO] plugin/kubernetes: waiting for Kubernetes API before starting server + [WARNING] plugin/kubernetes: starting server with unsynced Kubernetes API + .:53 + [INFO] plugin/reload: Running configuration SHA512 = 3e2243e8b9e7116f563b83b1933f477a68ba9ad4a829ed5d7e54629fb2ce53528b9bc6023030be20be434ad805fd246296dd428c64e9bbef3a70f22b8621f560 + CoreDNS-1.12.1 + linux/amd64, go1.24.1, 707c7c1 + [INFO] 127.0.0.1:59623 - 47384 "HINFO IN 8155857795771667456.1587578404596704812. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.028953701s + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/ready: Still waiting on: "kubernetes" + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Service: Get "https://10.96.0.1:443/api/v1/services?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.EndpointSlice: Get "https://10.96.0.1:443/apis/discovery.k8s.io/v1/endpointslices?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] plugin/kubernetes: pkg/mod/k8s.io/client-go@v0.32.3/tools/cache/reflector.go:251: failed to list *v1.Namespace: Get "https://10.96.0.1:443/api/v1/namespaces?limit=500&resourceVersion=0": dial tcp 10.96.0.1:443: i/o timeout + [ERROR] plugin/kubernetes: Unhandled Error + [INFO] 10.244.0.3:34185 - 14785 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000216317s + [INFO] 10.244.0.3:43602 - 10824 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000097832s + [INFO] 10.244.0.3:49469 - 28551 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000122743s + [INFO] 10.244.0.3:59329 - 16732 "AAAA IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 143 0.00017091s + [INFO] 10.244.0.3:59329 - 16453 "A IN netcat.default.svc.cluster.local. udp 50 false 512" NOERROR qr,aa,rd 98 0.000194338s + [INFO] 10.244.0.3:59964 - 65401 "A IN kubernetes.default.default.svc.cluster.local. udp 62 false 512" NXDOMAIN qr,aa,rd 155 0.000123265s + [INFO] 10.244.0.3:57570 - 22197 "A IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 106 0.000077202s + [INFO] 10.244.0.3:38853 - 16168 "AAAA IN kubernetes.default.svc.cluster.local. udp 54 false 512" NOERROR qr,aa,rd 147 0.000111113s + + + >>> k8s: describe api server pod(s): + Name: kube-apiserver-enable-default-cni-999044 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Node: enable-default-cni-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:25:25 +0000 + Labels: component=kube-apiserver + tier=control-plane + Annotations: kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 192.168.76.2:8443 + kubernetes.io/config.hash: 82310323aedbc6ef3df9fa46725030e8 + kubernetes.io/config.mirror: 82310323aedbc6ef3df9fa46725030e8 + kubernetes.io/config.seen: 2025-11-02T23:25:25.584185966Z + kubernetes.io/config.source: file + Status: Running + SeccompProfile: RuntimeDefault + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: Node/enable-default-cni-999044 + Containers: + kube-apiserver: + Container ID: docker://1a2c7edf6b6cb32d23b45cdb0fa389b67703d116aabe2798c779533c2d310930 + Image: registry.k8s.io/kube-apiserver:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-apiserver@sha256:b9d7c117f8ac52bed4b13aeed973dc5198f9d93a926e6fe9e0b384f155baa902 + Port: 8443/TCP (probe-port) + Host Port: 8443/TCP (probe-port) + Command: + kube-apiserver + --advertise-address=192.168.76.2 + --allow-privileged=true + --authorization-mode=Node,RBAC + --client-ca-file=/var/lib/minikube/certs/ca.crt + --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + --enable-bootstrap-token-auth=true + --etcd-cafile=/var/lib/minikube/certs/etcd/ca.crt + --etcd-certfile=/var/lib/minikube/certs/apiserver-etcd-client.crt + --etcd-keyfile=/var/lib/minikube/certs/apiserver-etcd-client.key + --etcd-servers=https://127.0.0.1:2379 + --kubelet-client-certificate=/var/lib/minikube/certs/apiserver-kubelet-client.crt + --kubelet-client-key=/var/lib/minikube/certs/apiserver-kubelet-client.key + --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + --proxy-client-cert-file=/var/lib/minikube/certs/front-proxy-client.crt + --proxy-client-key-file=/var/lib/minikube/certs/front-proxy-client.key + --requestheader-allowed-names=front-proxy-client + --requestheader-client-ca-file=/var/lib/minikube/certs/front-proxy-ca.crt + --requestheader-extra-headers-prefix=X-Remote-Extra- + --requestheader-group-headers=X-Remote-Group + --requestheader-username-headers=X-Remote-User + --secure-port=8443 + --service-account-issuer=https://kubernetes.default.svc.cluster.local + --service-account-key-file=/var/lib/minikube/certs/sa.pub + --service-account-signing-key-file=/var/lib/minikube/certs/sa.key + --service-cluster-ip-range=10.96.0.0/12 + --tls-cert-file=/var/lib/minikube/certs/apiserver.crt + --tls-private-key-file=/var/lib/minikube/certs/apiserver.key + State: Running + Started: Sun, 02 Nov 2025 23:25:21 +0000 + Ready: True + Restart Count: 0 + Requests: + cpu: 250m + Liveness: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=8 + Readiness: http-get https://192.168.76.2:probe-port/readyz delay=0s timeout=15s period=1s #success=1 #failure=3 + Startup: http-get https://192.168.76.2:probe-port/livez delay=10s timeout=15s period=10s #success=1 #failure=24 + Environment: + Mounts: + /etc/ca-certificates from etc-ca-certificates (ro) + /etc/ssl/certs from ca-certs (ro) + /usr/local/share/ca-certificates from usr-local-share-ca-certificates (ro) + /usr/share/ca-certificates from usr-share-ca-certificates (ro) + /var/lib/minikube/certs from k8s-certs (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + ca-certs: + Type: HostPath (bare host directory volume) + Path: /etc/ssl/certs + HostPathType: DirectoryOrCreate + etc-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /etc/ca-certificates + HostPathType: DirectoryOrCreate + k8s-certs: + Type: HostPath (bare host directory volume) + Path: /var/lib/minikube/certs + HostPathType: DirectoryOrCreate + usr-local-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/local/share/ca-certificates + HostPathType: DirectoryOrCreate + usr-share-ca-certificates: + Type: HostPath (bare host directory volume) + Path: /usr/share/ca-certificates + HostPathType: DirectoryOrCreate + QoS Class: Burstable + Node-Selectors: + Tolerations: :NoExecute op=Exists + Events: + + + >>> k8s: api server logs: + I1102 23:25:22.079243 1 options.go:263] external host was not specified, using 192.168.76.2 + I1102 23:25:22.080676 1 server.go:150] Version: v1.34.1 + I1102 23:25:22.080694 1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + W1102 23:25:22.280020 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=rbac.authorization.k8s.io/v1alpha1 + W1102 23:25:22.280037 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=resource.k8s.io/v1alpha3 + W1102 23:25:22.280041 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=admissionregistration.k8s.io/v1alpha1 + W1102 23:25:22.280044 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=internal.apiserver.k8s.io/v1alpha1 + W1102 23:25:22.280047 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=scheduling.k8s.io/v1alpha1 + W1102 23:25:22.280050 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storage.k8s.io/v1alpha1 + W1102 23:25:22.280052 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=coordination.k8s.io/v1alpha2 + W1102 23:25:22.280055 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=storagemigration.k8s.io/v1alpha1 + W1102 23:25:22.280057 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=authentication.k8s.io/v1alpha1 + W1102 23:25:22.280060 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=certificates.k8s.io/v1alpha1 + W1102 23:25:22.280063 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=imagepolicy.k8s.io/v1alpha1 + W1102 23:25:22.280065 1 api_enablement.go:112] alpha api enabled with emulated version 1.34 instead of the binary's version 1.34.1, this is unsupported, proceed at your own risk: api=node.k8s.io/v1alpha1 + W1102 23:25:22.289880 1 logging.go:55] [core] [Channel #1 SubChannel #3]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.290311 1 logging.go:55] [core] [Channel #2 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:22.291207 1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer" + I1102 23:25:22.296359 1 shared_informer.go:349] "Waiting for caches to sync" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:22.305126 1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook. + I1102 23:25:22.305139 1 plugins.go:160] Loaded 13 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota. + I1102 23:25:22.305279 1 instance.go:239] Using reconciler: lease + W1102 23:25:22.305905 1 logging.go:55] [core] [Channel #7 SubChannel #8]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.876959 1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.881763 1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:22.887152 1 handler.go:285] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager + W1102 23:25:22.887164 1 genericapiserver.go:784] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources. + I1102 23:25:22.889228 1 cidrallocator.go:197] starting ServiceCIDR Allocator Controller + W1102 23:25:22.889566 1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.892820 1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.896131 1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.899279 1 logging.go:55] [core] [Channel #39 SubChannel #40]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.902527 1 logging.go:55] [core] [Channel #43 SubChannel #44]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.905891 1 logging.go:55] [core] [Channel #47 SubChannel #48]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.909325 1 logging.go:55] [core] [Channel #51 SubChannel #52]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.912534 1 logging.go:55] [core] [Channel #55 SubChannel #56]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.915734 1 logging.go:55] [core] [Channel #59 SubChannel #60]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.919589 1 logging.go:55] [core] [Channel #63 SubChannel #64]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.922843 1 logging.go:55] [core] [Channel #67 SubChannel #68]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.925982 1 logging.go:55] [core] [Channel #71 SubChannel #72]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.929283 1 logging.go:55] [core] [Channel #75 SubChannel #76]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.932856 1 logging.go:55] [core] [Channel #79 SubChannel #80]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.936878 1 logging.go:55] [core] [Channel #83 SubChannel #84]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.942497 1 logging.go:55] [core] [Channel #87 SubChannel #88]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.945744 1 logging.go:55] [core] [Channel #91 SubChannel #92]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:22.957440 1 handler.go:285] Adding GroupVersion v1 to ResourceManager + I1102 23:25:22.957574 1 apis.go:112] API group "internal.apiserver.k8s.io" is not enabled, skipping. + W1102 23:25:22.958206 1 logging.go:55] [core] [Channel #95 SubChannel #96]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.961525 1 logging.go:55] [core] [Channel #99 SubChannel #100]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.965218 1 logging.go:55] [core] [Channel #103 SubChannel #104]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.968556 1 logging.go:55] [core] [Channel #107 SubChannel #108]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.972338 1 logging.go:55] [core] [Channel #111 SubChannel #112]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.976156 1 logging.go:55] [core] [Channel #115 SubChannel #116]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.979584 1 logging.go:55] [core] [Channel #119 SubChannel #120]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:22.983302 1 logging.go:55] [core] [Channel #123 SubChannel #124]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.987069 1 logging.go:55] [core] [Channel #127 SubChannel #128]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.990537 1 logging.go:55] [core] [Channel #131 SubChannel #132]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.994587 1 logging.go:55] [core] [Channel #135 SubChannel #136]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:22.999069 1 logging.go:55] [core] [Channel #139 SubChannel #140]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:23.003942 1 logging.go:55] [core] [Channel #143 SubChannel #144]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.007248 1 logging.go:55] [core] [Channel #147 SubChannel #148]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:23.010482 1 logging.go:55] [core] [Channel #151 SubChannel #152]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.013633 1 logging.go:55] [core] [Channel #155 SubChannel #156]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.016767 1 logging.go:55] [core] [Channel #159 SubChannel #160]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.020829 1 logging.go:55] [core] [Channel #163 SubChannel #164]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.024197 1 logging.go:55] [core] [Channel #167 SubChannel #168]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.027302 1 logging.go:55] [core] [Channel #171 SubChannel #172]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.030459 1 logging.go:55] [core] [Channel #175 SubChannel #176]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.033787 1 logging.go:55] [core] [Channel #179 SubChannel #180]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.037227 1 logging.go:55] [core] [Channel #183 SubChannel #184]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.040382 1 logging.go:55] [core] [Channel #187 SubChannel #188]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:23.043141 1 apis.go:112] API group "storagemigration.k8s.io" is not enabled, skipping. + W1102 23:25:23.043684 1 logging.go:55] [core] [Channel #191 SubChannel #192]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.046767 1 logging.go:55] [core] [Channel #195 SubChannel #196]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.054099 1 logging.go:55] [core] [Channel #199 SubChannel #200]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.057359 1 logging.go:55] [core] [Channel #203 SubChannel #204]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.060575 1 logging.go:55] [core] [Channel #207 SubChannel #208]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.064971 1 logging.go:55] [core] [Channel #211 SubChannel #212]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.068531 1 logging.go:55] [core] [Channel #215 SubChannel #216]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.072039 1 logging.go:55] [core] [Channel #219 SubChannel #220]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled" + W1102 23:25:23.075328 1 logging.go:55] [core] [Channel #223 SubChannel #224]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.078600 1 logging.go:55] [core] [Channel #227 SubChannel #228]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.081894 1 logging.go:55] [core] [Channel #231 SubChannel #232]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.085854 1 logging.go:55] [core] [Channel #235 SubChannel #236]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.089127 1 logging.go:55] [core] [Channel #239 SubChannel #240]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.101242 1 logging.go:55] [core] [Channel #243 SubChannel #244]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.104574 1 logging.go:55] [core] [Channel #247 SubChannel #248]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + W1102 23:25:23.108205 1 logging.go:55] [core] [Channel #251 SubChannel #252]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:23.116671 1 handler.go:285] Adding GroupVersion authentication.k8s.io v1 to ResourceManager + W1102 23:25:23.116681 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.116684 1 genericapiserver.go:784] Skipping API authentication.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.116890 1 handler.go:285] Adding GroupVersion authorization.k8s.io v1 to ResourceManager + W1102 23:25:23.116894 1 genericapiserver.go:784] Skipping API authorization.k8s.io/v1beta1 because it has no resources. + I1102 23:25:23.117341 1 handler.go:285] Adding GroupVersion autoscaling v2 to ResourceManager + I1102 23:25:23.117705 1 handler.go:285] Adding GroupVersion autoscaling v1 to ResourceManager + W1102 23:25:23.117710 1 genericapiserver.go:784] Skipping API autoscaling/v2beta1 because it has no resources. + W1102 23:25:23.117712 1 genericapiserver.go:784] Skipping API autoscaling/v2beta2 because it has no resources. + I1102 23:25:23.118386 1 handler.go:285] Adding GroupVersion batch v1 to ResourceManager + W1102 23:25:23.118405 1 genericapiserver.go:784] Skipping API batch/v1beta1 because it has no resources. + I1102 23:25:23.118836 1 handler.go:285] Adding GroupVersion certificates.k8s.io v1 to ResourceManager + W1102 23:25:23.118841 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.118843 1 genericapiserver.go:784] Skipping API certificates.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.119602 1 handler.go:285] Adding GroupVersion coordination.k8s.io v1 to ResourceManager + W1102 23:25:23.119616 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.119621 1 genericapiserver.go:784] Skipping API coordination.k8s.io/v1alpha2 because it has no resources. + I1102 23:25:23.120658 1 handler.go:285] Adding GroupVersion discovery.k8s.io v1 to ResourceManager + W1102 23:25:23.120668 1 genericapiserver.go:784] Skipping API discovery.k8s.io/v1beta1 because it has no resources. + I1102 23:25:23.123897 1 handler.go:285] Adding GroupVersion networking.k8s.io v1 to ResourceManager + W1102 23:25:23.123927 1 genericapiserver.go:784] Skipping API networking.k8s.io/v1beta1 because it has no resources. + I1102 23:25:23.124835 1 handler.go:285] Adding GroupVersion node.k8s.io v1 to ResourceManager + W1102 23:25:23.124844 1 genericapiserver.go:784] Skipping API node.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.124847 1 genericapiserver.go:784] Skipping API node.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.125384 1 handler.go:285] Adding GroupVersion policy v1 to ResourceManager + W1102 23:25:23.125391 1 genericapiserver.go:784] Skipping API policy/v1beta1 because it has no resources. + I1102 23:25:23.126227 1 handler.go:285] Adding GroupVersion rbac.authorization.k8s.io v1 to ResourceManager + W1102 23:25:23.126234 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.126236 1 genericapiserver.go:784] Skipping API rbac.authorization.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.126474 1 handler.go:285] Adding GroupVersion scheduling.k8s.io v1 to ResourceManager + W1102 23:25:23.126479 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.126481 1 genericapiserver.go:784] Skipping API scheduling.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.127594 1 handler.go:285] Adding GroupVersion storage.k8s.io v1 to ResourceManager + W1102 23:25:23.127604 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.127608 1 genericapiserver.go:784] Skipping API storage.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.128146 1 handler.go:285] Adding GroupVersion flowcontrol.apiserver.k8s.io v1 to ResourceManager + W1102 23:25:23.128151 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta3 because it has no resources. + W1102 23:25:23.128153 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta2 because it has no resources. + W1102 23:25:23.128155 1 genericapiserver.go:784] Skipping API flowcontrol.apiserver.k8s.io/v1beta1 because it has no resources. + I1102 23:25:23.129727 1 handler.go:285] Adding GroupVersion apps v1 to ResourceManager + W1102 23:25:23.129734 1 genericapiserver.go:784] Skipping API apps/v1beta2 because it has no resources. + W1102 23:25:23.129737 1 genericapiserver.go:784] Skipping API apps/v1beta1 because it has no resources. + I1102 23:25:23.130548 1 handler.go:285] Adding GroupVersion admissionregistration.k8s.io v1 to ResourceManager + W1102 23:25:23.130555 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.130558 1 genericapiserver.go:784] Skipping API admissionregistration.k8s.io/v1alpha1 because it has no resources. + I1102 23:25:23.130812 1 handler.go:285] Adding GroupVersion events.k8s.io v1 to ResourceManager + W1102 23:25:23.130816 1 genericapiserver.go:784] Skipping API events.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.130840 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta2 because it has no resources. + I1102 23:25:23.131693 1 handler.go:285] Adding GroupVersion resource.k8s.io v1 to ResourceManager + W1102 23:25:23.131700 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1beta1 because it has no resources. + W1102 23:25:23.131703 1 genericapiserver.go:784] Skipping API resource.k8s.io/v1alpha3 because it has no resources. + W1102 23:25:23.133175 1 logging.go:55] [core] [Channel #255 SubChannel #256]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled" + I1102 23:25:23.136216 1 handler.go:285] Adding GroupVersion apiregistration.k8s.io v1 to ResourceManager + W1102 23:25:23.136224 1 genericapiserver.go:784] Skipping API apiregistration.k8s.io/v1beta1 because it has no resources. + I1102 23:25:23.321712 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:23.321737 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:23.321900 1 dynamic_serving_content.go:135] "Starting controller" name="serving-cert::/var/lib/minikube/certs/apiserver.crt::/var/lib/minikube/certs/apiserver.key" + I1102 23:25:23.322086 1 secure_serving.go:211] Serving securely on [::]:8443 + I1102 23:25:23.322126 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" + I1102 23:25:23.322164 1 local_available_controller.go:156] Starting LocalAvailability controller + I1102 23:25:23.322173 1 cache.go:32] Waiting for caches to sync for LocalAvailability controller + I1102 23:25:23.322177 1 aggregator.go:169] waiting for initial CRD sync... + I1102 23:25:23.322189 1 controller.go:78] Starting OpenAPI AggregationController + I1102 23:25:23.322231 1 system_namespaces_controller.go:66] Starting system namespaces controller + I1102 23:25:23.322242 1 remote_available_controller.go:425] Starting RemoteAvailability controller + I1102 23:25:23.322247 1 cache.go:32] Waiting for caches to sync for RemoteAvailability controller + I1102 23:25:23.322262 1 apf_controller.go:377] Starting API Priority and Fairness config controller + I1102 23:25:23.322264 1 controller.go:119] Starting legacy_token_tracking_controller + I1102 23:25:23.322269 1 shared_informer.go:349] "Waiting for caches to sync" controller="configmaps" + I1102 23:25:23.322278 1 gc_controller.go:78] Starting apiserver lease garbage collector + I1102 23:25:23.322283 1 dynamic_serving_content.go:135] "Starting controller" name="aggregator-proxy-cert::/var/lib/minikube/certs/front-proxy-client.crt::/var/lib/minikube/certs/front-proxy-client.key" + I1102 23:25:23.322331 1 repairip.go:210] Starting ipallocator-repair-controller + I1102 23:25:23.322336 1 shared_informer.go:349] "Waiting for caches to sync" controller="ipallocator-repair-controller" + I1102 23:25:23.322427 1 apiservice_controller.go:100] Starting APIServiceRegistrationController + I1102 23:25:23.322432 1 cache.go:32] Waiting for caches to sync for APIServiceRegistrationController controller + I1102 23:25:23.322443 1 controller.go:80] Starting OpenAPI V3 AggregationController + I1102 23:25:23.323559 1 customresource_discovery_controller.go:294] Starting DiscoveryController + I1102 23:25:23.323653 1 default_servicecidr_controller.go:111] Starting kubernetes-service-cidr-controller + I1102 23:25:23.323717 1 shared_informer.go:349] "Waiting for caches to sync" controller="kubernetes-service-cidr-controller" + I1102 23:25:23.330085 1 cluster_authentication_trust_controller.go:459] Starting cluster_authentication_trust_controller controller + I1102 23:25:23.330111 1 shared_informer.go:349] "Waiting for caches to sync" controller="cluster_authentication_trust_controller" + I1102 23:25:23.330129 1 crdregistration_controller.go:114] Starting crd-autoregister controller + I1102 23:25:23.330132 1 shared_informer.go:349] "Waiting for caches to sync" controller="crd-autoregister" + I1102 23:25:23.330177 1 controller.go:142] Starting OpenAPI controller + I1102 23:25:23.330193 1 controller.go:90] Starting OpenAPI V3 controller + I1102 23:25:23.330208 1 naming_controller.go:299] Starting NamingConditionController + I1102 23:25:23.330217 1 establishing_controller.go:81] Starting EstablishingController + I1102 23:25:23.330287 1 nonstructuralschema_controller.go:195] Starting NonStructuralSchemaConditionController + I1102 23:25:23.330297 1 apiapproval_controller.go:189] Starting KubernetesAPIApprovalPolicyConformantConditionController + I1102 23:25:23.330305 1 crd_finalizer.go:269] Starting CRDFinalizer + I1102 23:25:23.330347 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + I1102 23:25:23.330399 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/var/lib/minikube/certs/front-proxy-ca.crt" + I1102 23:25:23.391283 1 shared_informer.go:356] "Caches are synced" controller="node_authorizer" + I1102 23:25:23.396456 1 shared_informer.go:356] "Caches are synced" controller="*generic.policySource[*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicy,*k8s.io/api/admissionregistration/v1.ValidatingAdmissionPolicyBinding,k8s.io/apiserver/pkg/admission/plugin/policy/validating.Validator]" + I1102 23:25:23.396466 1 policy_source.go:240] refreshing policies + I1102 23:25:23.423149 1 shared_informer.go:356] "Caches are synced" controller="ipallocator-repair-controller" + I1102 23:25:23.423158 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:25:23.423170 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:25:23.423181 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:25:23.423186 1 cache.go:39] Caches are synced for APIServiceRegistrationController controller + I1102 23:25:23.423187 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:25:23.423207 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:25:23.423255 1 shared_informer.go:356] "Caches are synced" controller="configmaps" + I1102 23:25:23.423811 1 shared_informer.go:356] "Caches are synced" controller="kubernetes-service-cidr-controller" + I1102 23:25:23.423830 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:25:23.423935 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:25:23.425198 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:23.425230 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:25:23.427279 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:23.427410 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:25:23.430215 1 shared_informer.go:356] "Caches are synced" controller="crd-autoregister" + I1102 23:25:23.430215 1 shared_informer.go:356] "Caches are synced" controller="cluster_authentication_trust_controller" + I1102 23:25:23.430247 1 aggregator.go:171] initial CRD sync complete... + I1102 23:25:23.430256 1 autoregister_controller.go:144] Starting autoregister controller + I1102 23:25:23.430260 1 cache.go:32] Waiting for caches to sync for autoregister controller + I1102 23:25:23.430264 1 cache.go:39] Caches are synced for autoregister controller + I1102 23:25:23.444973 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:25:24.324625 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:25:24.326768 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:25:24.326777 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:25:24.554233 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:25:24.572440 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:25:24.627154 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:25:24.629972 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2] + I1102 23:25:24.630504 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:25:24.632825 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:25:25.347653 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:25:25.851467 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:25:25.856002 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:25:25.859208 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + I1102 23:25:30.999351 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:31.001310 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:25:31.198382 1 controller.go:667] quota admission added evaluator for: replicasets.apps + I1102 23:25:31.448343 1 controller.go:667] quota admission added evaluator for: controllerrevisions.apps + I1102 23:26:06.269699 1 alloc.go:328] "allocated clusterIPs" service="default/netcat" clusterIPs={"IPv4":"10.108.236.102"} + E1102 23:26:15.372739 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:44694: use of closed network connection + E1102 23:26:15.445401 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:44716: use of closed network connection + E1102 23:26:15.604969 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:44732: use of closed network connection + E1102 23:26:20.675029 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:44750: use of closed network connection + E1102 23:26:20.759567 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:59786: use of closed network connection + E1102 23:26:20.838964 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:59810: use of closed network connection + E1102 23:26:20.912059 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:59830: use of closed network connection + E1102 23:26:20.984362 1 conn.go:339] Error on socket receive: read tcp 192.168.76.2:8443->192.168.76.1:59836: use of closed network connection + + + >>> host: /etc/cni: + /etc/cni/net.d/cni.lock + /etc/cni/net.d/87-podman-bridge.conflist.mk_disabled + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [{ "dst": "0.0.0.0/0" }], + "ranges": [ + [ + { + "subnet": "10.88.0.0/16", + "gateway": "10.88.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + }, + { + "type": "tuning" + } + ] + } + /etc/cni/net.d/10-crio-bridge.conflist.disabled.mk_disabled + { + "cniVersion": "1.0.0", + "name": "crio", + "plugins": [ + { + "type": "bridge", + "bridge": "cni0", + "isGateway": true, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "routes": [ + { "dst": "0.0.0.0/0" }, + { "dst": "::/0" } + ], + "ranges": [ + [{ "subnet": "10.85.0.0/16" }], + [{ "subnet": "1100:200::/24" }] + ] + } + } + ] + } + /etc/cni/net.d/1-k8s.conflist + + { + "cniVersion": "0.4.0", + "name": "bridge", + "plugins": [ + { + "type": "bridge", + "bridge": "bridge", + "addIf": "true", + "isDefaultGateway": true, + "forceAddress": false, + "ipMasq": true, + "hairpinMode": true, + "ipam": { + "type": "host-local", + "subnet": "10.244.0.0/16" + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall" + } + ] + } + + + >>> host: ip a s: + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever + 2: tunl0@NONE: mtu 1480 qdisc noop state DOWN group default qlen 1000 + link/ipip 0.0.0.0 brd 0.0.0.0 + 3: eth0@if375: mtu 1500 qdisc noqueue state UP group default + link/ether 72:c8:43:eb:da:b3 brd ff:ff:ff:ff:ff:ff link-netnsid 0 + inet 192.168.76.2/24 brd 192.168.76.255 scope global eth0 + valid_lft forever preferred_lft forever + 4: docker0: mtu 1500 qdisc noqueue state DOWN group default + link/ether b6:26:e0:7b:4c:71 brd ff:ff:ff:ff:ff:ff + inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0 + valid_lft forever preferred_lft forever + 5: bridge: mtu 1500 qdisc noqueue state UP group default qlen 1000 + link/ether 5a:90:d0:1c:b4:59 brd ff:ff:ff:ff:ff:ff + inet 10.244.0.1/16 brd 10.244.255.255 scope global bridge + valid_lft forever preferred_lft forever + inet6 fe80::5890:d0ff:fe1c:b459/64 scope link + valid_lft forever preferred_lft forever + 6: veth69117a61@if3: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether d6:7d:42:21:87:a0 brd ff:ff:ff:ff:ff:ff link-netnsid 1 + inet6 fe80::d47d:42ff:fe21:87a0/64 scope link + valid_lft forever preferred_lft forever + 7: veth2fd7cd0d@if3: mtu 1500 qdisc noqueue master bridge state UP group default + link/ether 42:78:e3:cf:ca:b8 brd ff:ff:ff:ff:ff:ff link-netnsid 2 + inet6 fe80::4078:e3ff:fecf:cab8/64 scope link + valid_lft forever preferred_lft forever + + + >>> host: ip r s: + default via 192.168.76.1 dev eth0 + 10.244.0.0/16 dev bridge proto kernel scope link src 10.244.0.1 + 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown + 192.168.76.0/24 dev eth0 proto kernel scope link src 192.168.76.2 + + + >>> host: iptables-save: + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:24 2025 + *mangle + :PREROUTING ACCEPT [24778:56404049] + :INPUT ACCEPT [24719:56399811] + :FORWARD ACCEPT [59:4238] + :OUTPUT ACCEPT [18345:5782982] + :POSTROUTING ACCEPT [18404:5787220] + :KUBE-IPTABLES-HINT - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-PROXY-CANARY - [0:0] + COMMIT + # Completed on Sun Nov 2 23:26:24 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:24 2025 + *filter + :INPUT ACCEPT [4834:1084242] + :FORWARD ACCEPT [0:0] + :OUTPUT ACCEPT [4781:1388440] + :CNI-ADMIN - [0:0] + :CNI-FORWARD - [0:0] + :DOCKER - [0:0] + :DOCKER-BRIDGE - [0:0] + :DOCKER-CT - [0:0] + :DOCKER-FORWARD - [0:0] + :DOCKER-ISOLATION-STAGE-1 - [0:0] + :DOCKER-ISOLATION-STAGE-2 - [0:0] + :DOCKER-USER - [0:0] + :KUBE-EXTERNAL-SERVICES - [0:0] + :KUBE-FIREWALL - [0:0] + :KUBE-FORWARD - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-PROXY-FIREWALL - [0:0] + :KUBE-SERVICES - [0:0] + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A INPUT -m comment --comment "kubernetes health check service ports" -j KUBE-NODEPORTS + -A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A INPUT -j KUBE-FIREWALL + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A FORWARD -m comment --comment "kubernetes forwarding rules" -j KUBE-FORWARD + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A FORWARD -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES + -A FORWARD -m comment --comment "CNI firewall plugin rules" -j CNI-FORWARD + -A FORWARD -j DOCKER-USER + -A FORWARD -j DOCKER-FORWARD + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes load balancer firewall" -j KUBE-PROXY-FIREWALL + -A OUTPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -j KUBE-FIREWALL + -A CNI-FORWARD -m comment --comment "CNI firewall plugin admin overrides" -j CNI-ADMIN + -A CNI-FORWARD -d 10.244.0.2/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.2/32 -j ACCEPT + -A CNI-FORWARD -d 10.244.0.3/32 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A CNI-FORWARD -s 10.244.0.3/32 -j ACCEPT + -A DOCKER ! -i docker0 -o docker0 -j DROP + -A DOCKER-BRIDGE -o docker0 -j DOCKER + -A DOCKER-CT -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + -A DOCKER-FORWARD -j DOCKER-CT + -A DOCKER-FORWARD -j DOCKER-ISOLATION-STAGE-1 + -A DOCKER-FORWARD -j DOCKER-BRIDGE + -A DOCKER-FORWARD -i docker0 -j ACCEPT + -A DOCKER-ISOLATION-STAGE-1 -i docker0 ! -o docker0 -j DOCKER-ISOLATION-STAGE-2 + -A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP + -A KUBE-FIREWALL ! -s 127.0.0.0/8 -d 127.0.0.0/8 -m comment --comment "block incoming localnet connections" -m conntrack ! --ctstate RELATED,ESTABLISHED,DNAT -j DROP + -A KUBE-FORWARD -m conntrack --ctstate INVALID -m nfacct --nfacct-name ct_state_invalid_dropped_pkts -j DROP + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT + -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT + COMMIT + # Completed on Sun Nov 2 23:26:24 2025 + # Generated by iptables-save v1.8.9 on Sun Nov 2 23:26:24 2025 + *nat + :PREROUTING ACCEPT [36:2160] + :INPUT ACCEPT [36:2160] + :OUTPUT ACCEPT [67:4020] + :POSTROUTING ACCEPT [67:4020] + :CNI-2fef722eec9a55f58fde521a - [0:0] + :CNI-364d344cd05106382925c025 - [0:0] + :DOCKER - [0:0] + :DOCKER_OUTPUT - [0:0] + :DOCKER_POSTROUTING - [0:0] + :KUBE-KUBELET-CANARY - [0:0] + :KUBE-MARK-MASQ - [0:0] + :KUBE-NODEPORTS - [0:0] + :KUBE-POSTROUTING - [0:0] + :KUBE-PROXY-CANARY - [0:0] + :KUBE-SEP-IT2ZTR26TO4XFPTO - [0:0] + :KUBE-SEP-M66F6TD25XSFZOMV - [0:0] + :KUBE-SEP-N4G2XR5TDX7PQE7P - [0:0] + :KUBE-SEP-UTWFOSUDHOCXYA2F - [0:0] + :KUBE-SEP-YIL6JZP7A3QYXJU2 - [0:0] + :KUBE-SERVICES - [0:0] + :KUBE-SVC-ERIFXISQEP7F7OF4 - [0:0] + :KUBE-SVC-JD5MR3NA4I4DYORP - [0:0] + :KUBE-SVC-NPX46M4PTMTKRN6Y - [0:0] + :KUBE-SVC-TCOU7JCQXEZGVUNU - [0:0] + :KUBE-SVC-WDP22YZC5S6MZWYX - [0:0] + -A PREROUTING -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A PREROUTING -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER + -A OUTPUT -m comment --comment "kubernetes service portals" -j KUBE-SERVICES + -A OUTPUT -d 192.168.76.1/32 -j DOCKER_OUTPUT + -A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER + -A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING + -A POSTROUTING -s 172.17.0.0/16 ! -o docker0 -j MASQUERADE + -A POSTROUTING -d 192.168.76.1/32 -j DOCKER_POSTROUTING + -A POSTROUTING -s 10.244.0.2/32 -m comment --comment "name: \"bridge\" id: \"ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b\"" -j CNI-364d344cd05106382925c025 + -A POSTROUTING -s 10.244.0.3/32 -m comment --comment "name: \"bridge\" id: \"cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e\"" -j CNI-2fef722eec9a55f58fde521a + -A CNI-2fef722eec9a55f58fde521a -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e\"" -j ACCEPT + -A CNI-2fef722eec9a55f58fde521a ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e\"" -j MASQUERADE + -A CNI-364d344cd05106382925c025 -d 10.244.0.0/16 -m comment --comment "name: \"bridge\" id: \"ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b\"" -j ACCEPT + -A CNI-364d344cd05106382925c025 ! -d 224.0.0.0/4 -m comment --comment "name: \"bridge\" id: \"ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b\"" -j MASQUERADE + -A DOCKER -i docker0 -j RETURN + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p tcp -m tcp --dport 53 -j DNAT --to-destination 127.0.0.11:34309 + -A DOCKER_OUTPUT -d 192.168.76.1/32 -p udp -m udp --dport 53 -j DNAT --to-destination 127.0.0.11:34134 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p tcp -m tcp --sport 34309 -j SNAT --to-source 192.168.76.1:53 + -A DOCKER_POSTROUTING -s 127.0.0.11/32 -p udp -m udp --sport 34134 -j SNAT --to-source 192.168.76.1:53 + -A KUBE-MARK-MASQ -j MARK --set-xmark 0x4000/0x4000 + -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN + -A KUBE-POSTROUTING -j MARK --set-xmark 0x4000/0x0 + -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE --random-fully + -A KUBE-SEP-IT2ZTR26TO4XFPTO -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-MARK-MASQ + -A KUBE-SEP-IT2ZTR26TO4XFPTO -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m tcp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SEP-M66F6TD25XSFZOMV -s 192.168.76.2/32 -m comment --comment "default/kubernetes:https" -j KUBE-MARK-MASQ + -A KUBE-SEP-M66F6TD25XSFZOMV -p tcp -m comment --comment "default/kubernetes:https" -m tcp -j DNAT --to-destination 192.168.76.2:8443 + -A KUBE-SEP-N4G2XR5TDX7PQE7P -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:metrics" -j KUBE-MARK-MASQ + -A KUBE-SEP-N4G2XR5TDX7PQE7P -p tcp -m comment --comment "kube-system/kube-dns:metrics" -m tcp -j DNAT --to-destination 10.244.0.2:9153 + -A KUBE-SEP-UTWFOSUDHOCXYA2F -s 10.244.0.3/32 -m comment --comment "default/netcat" -j KUBE-MARK-MASQ + -A KUBE-SEP-UTWFOSUDHOCXYA2F -p tcp -m comment --comment "default/netcat" -m tcp -j DNAT --to-destination 10.244.0.3:8080 + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -s 10.244.0.2/32 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-MARK-MASQ + -A KUBE-SEP-YIL6JZP7A3QYXJU2 -p udp -m comment --comment "kube-system/kube-dns:dns" -m udp -j DNAT --to-destination 10.244.0.2:53 + -A KUBE-SERVICES -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-SVC-NPX46M4PTMTKRN6Y + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-SVC-JD5MR3NA4I4DYORP + -A KUBE-SERVICES -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-SVC-TCOU7JCQXEZGVUNU + -A KUBE-SERVICES -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-SVC-ERIFXISQEP7F7OF4 + -A KUBE-SERVICES -d 10.108.236.102/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-SVC-WDP22YZC5S6MZWYX + -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS + -A KUBE-SVC-ERIFXISQEP7F7OF4 ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp cluster IP" -m tcp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-ERIFXISQEP7F7OF4 -m comment --comment "kube-system/kube-dns:dns-tcp -> 10.244.0.2:53" -j KUBE-SEP-IT2ZTR26TO4XFPTO + -A KUBE-SVC-JD5MR3NA4I4DYORP ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p tcp -m comment --comment "kube-system/kube-dns:metrics cluster IP" -m tcp --dport 9153 -j KUBE-MARK-MASQ + -A KUBE-SVC-JD5MR3NA4I4DYORP -m comment --comment "kube-system/kube-dns:metrics -> 10.244.0.2:9153" -j KUBE-SEP-N4G2XR5TDX7PQE7P + -A KUBE-SVC-NPX46M4PTMTKRN6Y ! -s 10.244.0.0/16 -d 10.96.0.1/32 -p tcp -m comment --comment "default/kubernetes:https cluster IP" -m tcp --dport 443 -j KUBE-MARK-MASQ + -A KUBE-SVC-NPX46M4PTMTKRN6Y -m comment --comment "default/kubernetes:https -> 192.168.76.2:8443" -j KUBE-SEP-M66F6TD25XSFZOMV + -A KUBE-SVC-TCOU7JCQXEZGVUNU ! -s 10.244.0.0/16 -d 10.96.0.10/32 -p udp -m comment --comment "kube-system/kube-dns:dns cluster IP" -m udp --dport 53 -j KUBE-MARK-MASQ + -A KUBE-SVC-TCOU7JCQXEZGVUNU -m comment --comment "kube-system/kube-dns:dns -> 10.244.0.2:53" -j KUBE-SEP-YIL6JZP7A3QYXJU2 + -A KUBE-SVC-WDP22YZC5S6MZWYX ! -s 10.244.0.0/16 -d 10.108.236.102/32 -p tcp -m comment --comment "default/netcat cluster IP" -m tcp --dport 8080 -j KUBE-MARK-MASQ + -A KUBE-SVC-WDP22YZC5S6MZWYX -m comment --comment "default/netcat -> 10.244.0.3:8080" -j KUBE-SEP-UTWFOSUDHOCXYA2F + COMMIT + # Completed on Sun Nov 2 23:26:24 2025 + + + >>> host: iptables table nat: + Chain PREROUTING (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + 52 3280 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 1 85 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 45 2700 DOCKER 0 -- * * 0.0.0.0/0 0.0.0.0/0 ADDRTYPE match dst-type LOCAL + + Chain INPUT (policy ACCEPT 37 packets, 2220 bytes) + pkts bytes target prot opt in out source destination + + Chain OUTPUT (policy ACCEPT 68 packets, 4080 bytes) + pkts bytes target prot opt in out source destination + 578 47765 KUBE-SERVICES 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service portals */ + 363 34979 DOCKER_OUTPUT 0 -- * * 0.0.0.0/0 192.168.76.1 + 101 6060 DOCKER 0 -- * * 0.0.0.0/0 !127.0.0.0/8 ADDRTYPE match dst-type LOCAL + + Chain POSTROUTING (policy ACCEPT 68 packets, 4080 bytes) + pkts bytes target prot opt in out source destination + 588 48500 KUBE-POSTROUTING 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes postrouting rules */ + 0 0 MASQUERADE 0 -- * !docker0 172.17.0.0/16 0.0.0.0/0 + 0 0 DOCKER_POSTROUTING 0 -- * * 0.0.0.0/0 192.168.76.1 + 3 180 CNI-364d344cd05106382925c025 0 -- * * 10.244.0.2 0.0.0.0/0 /* name: "bridge" id: "ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b" */ + 9 675 CNI-2fef722eec9a55f58fde521a 0 -- * * 10.244.0.3 0.0.0.0/0 /* name: "bridge" id: "cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e" */ + + Chain CNI-2fef722eec9a55f58fde521a (1 references) + pkts bytes target prot opt in out source destination + 9 675 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e" */ + 0 0 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e" */ + + Chain CNI-364d344cd05106382925c025 (1 references) + pkts bytes target prot opt in out source destination + 0 0 ACCEPT 0 -- * * 0.0.0.0/0 10.244.0.0/16 /* name: "bridge" id: "ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b" */ + 3 180 MASQUERADE 0 -- * * 0.0.0.0/0 !224.0.0.0/4 /* name: "bridge" id: "ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b" */ + + Chain DOCKER (2 references) + pkts bytes target prot opt in out source destination + 0 0 RETURN 0 -- docker0 * 0.0.0.0/0 0.0.0.0/0 + + Chain DOCKER_OUTPUT (2 references) + pkts bytes target prot opt in out source destination + 0 0 DNAT 6 -- * * 0.0.0.0/0 192.168.76.1 tcp dpt:53 to:127.0.0.11:34309 + 364 35064 DNAT 17 -- * * 0.0.0.0/0 192.168.76.1 udp dpt:53 to:127.0.0.11:34134 + + Chain DOCKER_POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 0 0 SNAT 6 -- * * 127.0.0.11 0.0.0.0/0 tcp spt:34309 to:192.168.76.1:53 + 0 0 SNAT 17 -- * * 127.0.0.11 0.0.0.0/0 udp spt:34134 to:192.168.76.1:53 + + Chain KUBE-KUBELET-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-MARK-MASQ (10 references) + pkts bytes target prot opt in out source destination + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK or 0x4000 + + Chain KUBE-NODEPORTS (1 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-POSTROUTING (1 references) + pkts bytes target prot opt in out source destination + 77 4755 RETURN 0 -- * * 0.0.0.0/0 0.0.0.0/0 mark match ! 0x4000/0x4000 + 1 60 MARK 0 -- * * 0.0.0.0/0 0.0.0.0/0 MARK xor 0x4000 + 1 60 MASQUERADE 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service traffic requiring SNAT */ random-fully + + Chain KUBE-PROXY-CANARY (0 references) + pkts bytes target prot opt in out source destination + + Chain KUBE-SEP-IT2ZTR26TO4XFPTO (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp */ tcp to:10.244.0.2:53 + + Chain KUBE-SEP-M66F6TD25XSFZOMV (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 192.168.76.2 0.0.0.0/0 /* default/kubernetes:https */ + 2 120 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https */ tcp to:192.168.76.2:8443 + + Chain KUBE-SEP-N4G2XR5TDX7PQE7P (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:metrics */ + 0 0 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics */ tcp to:10.244.0.2:9153 + + Chain KUBE-SEP-UTWFOSUDHOCXYA2F (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 0 -- * * 10.244.0.3 0.0.0.0/0 /* default/netcat */ + 1 60 DNAT 6 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat */ tcp to:10.244.0.3:8080 + + Chain KUBE-SEP-YIL6JZP7A3QYXJU2 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 0 -- * * 10.244.0.2 0.0.0.0/0 /* kube-system/kube-dns:dns */ + 8 615 DNAT 17 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns */ udp to:10.244.0.2:53 + + Chain KUBE-SERVICES (2 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-SVC-NPX46M4PTMTKRN6Y 6 -- * * 0.0.0.0/0 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 0 0 KUBE-SVC-JD5MR3NA4I4DYORP 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 8 615 KUBE-SVC-TCOU7JCQXEZGVUNU 17 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 1 60 KUBE-SVC-ERIFXISQEP7F7OF4 6 -- * * 0.0.0.0/0 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SVC-WDP22YZC5S6MZWYX 6 -- * * 0.0.0.0/0 10.108.236.102 /* default/netcat cluster IP */ tcp dpt:8080 + 103 6180 KUBE-NODEPORTS 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL + + Chain KUBE-SVC-ERIFXISQEP7F7OF4 (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:53 + 1 60 KUBE-SEP-IT2ZTR26TO4XFPTO 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns-tcp -> 10.244.0.2:53 */ + + Chain KUBE-SVC-JD5MR3NA4I4DYORP (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:metrics cluster IP */ tcp dpt:9153 + 0 0 KUBE-SEP-N4G2XR5TDX7PQE7P 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:metrics -> 10.244.0.2:9153 */ + + Chain KUBE-SVC-NPX46M4PTMTKRN6Y (1 references) + pkts bytes target prot opt in out source destination + 1 60 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.96.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:443 + 2 120 KUBE-SEP-M66F6TD25XSFZOMV 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/kubernetes:https -> 192.168.76.2:8443 */ + + Chain KUBE-SVC-TCOU7JCQXEZGVUNU (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 17 -- * * !10.244.0.0/16 10.96.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:53 + 8 615 KUBE-SEP-YIL6JZP7A3QYXJU2 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* kube-system/kube-dns:dns -> 10.244.0.2:53 */ + + Chain KUBE-SVC-WDP22YZC5S6MZWYX (1 references) + pkts bytes target prot opt in out source destination + 0 0 KUBE-MARK-MASQ 6 -- * * !10.244.0.0/16 10.108.236.102 /* default/netcat cluster IP */ tcp dpt:8080 + 1 60 KUBE-SEP-UTWFOSUDHOCXYA2F 0 -- * * 0.0.0.0/0 0.0.0.0/0 /* default/netcat -> 10.244.0.3:8080 */ + + + >>> k8s: describe kube-proxy daemon set: + Name: kube-proxy + Namespace: kube-system + Selector: k8s-app=kube-proxy + Node-Selector: kubernetes.io/os=linux + Labels: k8s-app=kube-proxy + Annotations: deprecated.daemonset.template.generation: 1 + Desired Number of Nodes Scheduled: 1 + Current Number of Nodes Scheduled: 1 + Number of Nodes Scheduled with Up-to-date Pods: 1 + Number of Nodes Scheduled with Available Pods: 1 + Number of Nodes Misscheduled: 0 + Pods Status: 1 Running / 0 Waiting / 0 Succeeded / 0 Failed + Pod Template: + Labels: k8s-app=kube-proxy + Service Account: kube-proxy + Containers: + kube-proxy: + Image: registry.k8s.io/kube-proxy:v1.34.1 + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + Priority Class Name: system-node-critical + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SuccessfulCreate 55s daemonset-controller Created pod: kube-proxy-nlt66 + + + >>> k8s: describe kube-proxy pod(s): + Name: kube-proxy-nlt66 + Namespace: kube-system + Priority: 2000001000 + Priority Class Name: system-node-critical + Service Account: kube-proxy + Node: enable-default-cni-999044/192.168.76.2 + Start Time: Sun, 02 Nov 2025 23:25:31 +0000 + Labels: controller-revision-hash=66486579fc + k8s-app=kube-proxy + pod-template-generation=1 + Annotations: + Status: Running + IP: 192.168.76.2 + IPs: + IP: 192.168.76.2 + Controlled By: DaemonSet/kube-proxy + Containers: + kube-proxy: + Container ID: docker://c5a1cf7eb578794961c439f44eb9beff4e42c3fab2e7cde0e4cf30ee30d38363 + Image: registry.k8s.io/kube-proxy:v1.34.1 + Image ID: docker-pullable://registry.k8s.io/kube-proxy@sha256:913cc83ca0b5588a81d86ce8eedeb3ed1e9c1326e81852a1ea4f622b74ff749a + Port: + Host Port: + Command: + /usr/local/bin/kube-proxy + --config=/var/lib/kube-proxy/config.conf + --hostname-override=$(NODE_NAME) + State: Running + Started: Sun, 02 Nov 2025 23:25:31 +0000 + Ready: True + Restart Count: 0 + Environment: + NODE_NAME: (v1:spec.nodeName) + Mounts: + /lib/modules from lib-modules (ro) + /run/xtables.lock from xtables-lock (rw) + /var/lib/kube-proxy from kube-proxy (rw) + /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-zxhpj (ro) + Conditions: + Type Status + PodReadyToStartContainers True + Initialized True + Ready True + ContainersReady True + PodScheduled True + Volumes: + kube-proxy: + Type: ConfigMap (a volume populated by a ConfigMap) + Name: kube-proxy + Optional: false + xtables-lock: + Type: HostPath (bare host directory volume) + Path: /run/xtables.lock + HostPathType: FileOrCreate + lib-modules: + Type: HostPath (bare host directory volume) + Path: /lib/modules + HostPathType: + kube-api-access-zxhpj: + Type: Projected (a volume that contains injected data from multiple sources) + TokenExpirationSeconds: 3607 + ConfigMapName: kube-root-ca.crt + Optional: false + DownwardAPI: true + QoS Class: BestEffort + Node-Selectors: kubernetes.io/os=linux + Tolerations: op=Exists + node.kubernetes.io/disk-pressure:NoSchedule op=Exists + node.kubernetes.io/memory-pressure:NoSchedule op=Exists + node.kubernetes.io/network-unavailable:NoSchedule op=Exists + node.kubernetes.io/not-ready:NoExecute op=Exists + node.kubernetes.io/pid-pressure:NoSchedule op=Exists + node.kubernetes.io/unreachable:NoExecute op=Exists + node.kubernetes.io/unschedulable:NoSchedule op=Exists + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Scheduled 55s default-scheduler Successfully assigned kube-system/kube-proxy-nlt66 to enable-default-cni-999044 + Normal Pulled 55s kubelet Container image "registry.k8s.io/kube-proxy:v1.34.1" already present on machine + Normal Created 55s kubelet Created container: kube-proxy + Normal Started 55s kubelet Started container kube-proxy + + + >>> k8s: kube-proxy logs: + I1102 23:25:32.016342 1 server_linux.go:53] "Using iptables proxy" + I1102 23:25:32.052773 1 shared_informer.go:349] "Waiting for caches to sync" controller="node informer cache" + I1102 23:25:32.152929 1 shared_informer.go:356] "Caches are synced" controller="node informer cache" + I1102 23:25:32.152948 1 server.go:219] "Successfully retrieved NodeIPs" NodeIPs=["192.168.76.2"] + E1102 23:25:32.153006 1 server.go:256] "Kube-proxy configuration may be incomplete or incorrect" err="nodePortAddresses is unset; NodePort connections will be accepted on all local IPs. Consider using `--nodeport-addresses primary`" + I1102 23:25:32.170387 1 server.go:265] "kube-proxy running in dual-stack mode" primary ipFamily="IPv4" + I1102 23:25:32.170416 1 server_linux.go:132] "Using iptables Proxier" + I1102 23:25:32.174484 1 proxier.go:242] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" ipFamily="IPv4" + I1102 23:25:32.174763 1 server.go:527] "Version info" version="v1.34.1" + I1102 23:25:32.174778 1 server.go:529] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + I1102 23:25:32.175494 1 config.go:309] "Starting node config controller" + I1102 23:25:32.175503 1 shared_informer.go:349] "Waiting for caches to sync" controller="node config" + I1102 23:25:32.175508 1 shared_informer.go:356] "Caches are synced" controller="node config" + I1102 23:25:32.175602 1 config.go:403] "Starting serviceCIDR config controller" + I1102 23:25:32.175605 1 shared_informer.go:349] "Waiting for caches to sync" controller="serviceCIDR config" + I1102 23:25:32.175728 1 config.go:200] "Starting service config controller" + I1102 23:25:32.175754 1 shared_informer.go:349] "Waiting for caches to sync" controller="service config" + I1102 23:25:32.175737 1 config.go:106] "Starting endpoint slice config controller" + I1102 23:25:32.175805 1 shared_informer.go:349] "Waiting for caches to sync" controller="endpoint slice config" + I1102 23:25:32.276283 1 shared_informer.go:356] "Caches are synced" controller="endpoint slice config" + I1102 23:25:32.276305 1 shared_informer.go:356] "Caches are synced" controller="service config" + I1102 23:25:32.276315 1 shared_informer.go:356] "Caches are synced" controller="serviceCIDR config" + + + >>> host: kubelet daemon status: + ● kubelet.service - kubelet: The Kubernetes Node Agent + Loaded: loaded (]8;;file://enable-default-cni-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/kubelet.service.d + └─]8;;file://enable-default-cni-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf10-kubeadm.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:25 UTC; 1min 0s ago + Docs: ]8;;http://kubernetes.io/docs/http://kubernetes.io/docs/]8;; + Main PID: 2227 (kubelet) + Tasks: 15 (limit: 629145) + Memory: 32.9M + CPU: 1.073s + CGroup: /system.slice/kubelet.service + └─2227 /var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=enable-default-cni-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.820307 2227 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fd7b2317-e3fe-43db-94b2-9750284d99e6-config-volume\") on node \"enable-default-cni-999044\" DevicePath \"\"" + Nov 02 23:25:34 enable-default-cni-999044 kubelet[2227]: I1102 23:25:34.125503 2227 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:25:35 enable-default-cni-999044 kubelet[2227]: I1102 23:25:35.596115 2227 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd7b2317-e3fe-43db-94b2-9750284d99e6" path="/var/lib/kubelet/pods/fd7b2317-e3fe-43db-94b2-9750284d99e6/volumes" + Nov 02 23:25:36 enable-default-cni-999044 kubelet[2227]: I1102 23:25:36.056614 2227 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:36 enable-default-cni-999044 kubelet[2227]: I1102 23:25:36.057122 2227 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:26:02 enable-default-cni-999044 kubelet[2227]: I1102 23:26:02.735851 2227 scope.go:117] "RemoveContainer" containerID="6591222ccfe8c2a08672c63541f176c814736970da9c70afa258dde83f641d2e" + Nov 02 23:26:06 enable-default-cni-999044 kubelet[2227]: I1102 23:26:06.281779 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knn6m\" (UniqueName: \"kubernetes.io/projected/89714356-9111-4f71-953e-c8949c1c52ec-kube-api-access-knn6m\") pod \"netcat-cd4db9dbf-h69w6\" (UID: \"89714356-9111-4f71-953e-c8949c1c52ec\") " pod="default/netcat-cd4db9dbf-h69w6" + Nov 02 23:26:08 enable-default-cni-999044 kubelet[2227]: I1102 23:26:08.781417 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-h69w6" podStartSLOduration=1.7814128710000001 podStartE2EDuration="2.781389708s" podCreationTimestamp="2025-11-02 23:26:06 +0000 UTC" firstStartedPulling="2025-11-02 23:26:06.682134258 +0000 UTC m=+41.138707713" lastFinishedPulling="2025-11-02 23:26:07.682111106 +0000 UTC m=+42.138684550" observedRunningTime="2025-11-02 23:26:08.781171695 +0000 UTC m=+43.237745153" watchObservedRunningTime="2025-11-02 23:26:08.781389708 +0000 UTC m=+43.237963166" + Nov 02 23:26:20 enable-default-cni-999044 kubelet[2227]: E1102 23:26:20.674891 2227 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:45992->[::1]:39843: write tcp [::1]:45992->[::1]:39843: write: broken pipe + Nov 02 23:26:20 enable-default-cni-999044 kubelet[2227]: E1102 23:26:20.912010 2227 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:51050->[::1]:39843: write tcp [::1]:51050->[::1]:39843: write: broken pipe + + + >>> host: kubelet daemon config: + # ]8;;file://enable-default-cni-999044/lib/systemd/system/kubelet.service/lib/systemd/system/kubelet.service]8;; + [Unit] + Description=kubelet: The Kubernetes Node Agent + Documentation=http://kubernetes.io/docs/ + StartLimitIntervalSec=0 + + [Service] + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet + Restart=always + # Tuned for local dev: faster than upstream default (10s), but slower than systemd default (100ms) + RestartSec=600ms + + [Install] + WantedBy=multi-user.target + + # ]8;;file://enable-default-cni-999044/etc/systemd/system/kubelet.service.d/10-kubeadm.conf/etc/systemd/system/kubelet.service.d/10-kubeadm.conf]8;; + [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=enable-default-cni-999044 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + [Install] + + + >>> k8s: kubelet logs: + Nov 02 23:25:17 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 404. + Nov 02 23:25:17 enable-default-cni-999044 kubelet[1540]: E1102 23:25:17.436218 1540 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:17 enable-default-cni-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:17 enable-default-cni-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 1. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 457 and the job result is done. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 457. + Nov 02 23:25:18 enable-default-cni-999044 kubelet[1680]: E1102 23:25:18.274403 1680 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 2. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:18 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 510 and the job result is done. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 510. + Nov 02 23:25:19 enable-default-cni-999044 kubelet[1710]: E1102 23:25:19.026434 1710 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 3. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 563 and the job result is done. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 563. + Nov 02 23:25:19 enable-default-cni-999044 kubelet[1721]: E1102 23:25:19.772577 1721 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:19 enable-default-cni-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: kubelet.service: Scheduled restart job, restart counter is at 4. + ░░ Subject: Automatic restarting of a unit has been scheduled + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ Automatic restarting of the unit kubelet.service has been scheduled, as the result for + ░░ the configured Restart= setting for the unit. + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 616 and the job result is done. + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 616. + Nov 02 23:25:20 enable-default-cni-999044 kubelet[1732]: E1102 23:25:20.529082 1732 run.go:72] "command failed" err="failed to load kubelet config file, path: /var/lib/kubelet/config.yaml, error: failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file \"/var/lib/kubelet/config.yaml\", error: open /var/lib/kubelet/config.yaml: no such file or directory" + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE + ░░ Subject: Unit process exited + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ An ExecStart= process belonging to unit kubelet.service has exited. + ░░  + ░░ The process' exit code is 'exited' and its exit status is 1. + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: kubelet.service: Failed with result 'exit-code'. + ░░ Subject: Unit failed + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has entered the 'failed' state with result 'exit-code'. + Nov 02 23:25:20 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 669 and the job result is done. + Nov 02 23:25:21 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 670. + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.422299 1763 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.422370 1763 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.422398 1763 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.422409 1763 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.422623 1763 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.428761 1763 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.430909 1763 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://192.168.76.2:8443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.437478 1763 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.441876 1763 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.441901 1763 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.442129 1763 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.442144 1763 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"enable-default-cni-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.442240 1763 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.442247 1763 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.442302 1763 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.443326 1763 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.444324 1763 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: Get \"https://192.168.76.2:8443/api/v1/nodes?fieldSelector=metadata.name%3Denable-default-cni-999044&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.444386 1763 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.444408 1763 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.444435 1763 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.444451 1763 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.444816 1763 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: Get \"https://192.168.76.2:8443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.445054 1763 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.445416 1763 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.445440 1763 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: W1102 23:25:21.445476 1763 probe.go:272] Flexvolume plugin directory at /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ does not exist. Recreating. + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.446108 1763 server.go:1262] "Started kubelet" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.446160 1763 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.446466 1763 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://192.168.76.2:8443/api/v1/namespaces/default/events\": dial tcp 192.168.76.2:8443: connect: connection refused" event="&Event{ObjectMeta:{enable-default-cni-999044.187454307d4d4fdb default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:enable-default-cni-999044,UID:enable-default-cni-999044,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:enable-default-cni-999044,},FirstTimestamp:2025-11-02 23:25:21.446088667 +0000 UTC m=+0.402099299,LastTimestamp:2025-11-02 23:25:21.446088667 +0000 UTC m=+0.402099299,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:enable-default-cni-999044,}" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.446180 1763 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.446637 1763 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.446814 1763 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.447075 1763 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.447812 1763 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.447887 1763 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.447963 1763 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.450431 1763 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://192.168.76.2:8443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.450501 1763 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/enable-default-cni-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="200ms" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.450524 1763 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.450692 1763 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"enable-default-cni-999044\" not found" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.450873 1763 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.451461 1763 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.451534 1763 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.452942 1763 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462457 1763 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462533 1763 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462547 1763 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462968 1763 policy_none.go:49] "None policy: Start" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462985 1763 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.462998 1763 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.463305 1763 policy_none.go:47] "Start" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.464037 1763 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.464703 1763 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.464721 1763 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.464734 1763 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.464764 1763 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.465113 1763 reflector.go:205] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://192.168.76.2:8443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.76.2:8443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.482500 1763 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.482567 1763 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.482579 1763 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.482713 1763 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.483147 1763 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.483190 1763 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"enable-default-cni-999044\" not found" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.577506 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.580999 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.582994 1763 kubelet_node_status.go:75] "Attempting to register node" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.583221 1763 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.591789 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.595431 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.650838 1763 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/enable-default-cni-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="400ms" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.652989 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-ca-certs\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653010 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-flexvolume-dir\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653023 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-k8s-certs\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653032 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/a1c5b18f7c6a5d58f0bc737993beed22-etcd-certs\") pod \"etcd-enable-default-cni-999044\" (UID: \"a1c5b18f7c6a5d58f0bc737993beed22\") " pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653041 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-ca-certs\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653049 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-etc-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653057 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-usr-share-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653066 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/03b4e55f02e8323f734f8f6b5901761a-kubeconfig\") pod \"kube-scheduler-enable-default-cni-999044\" (UID: \"03b4e55f02e8323f734f8f6b5901761a\") " pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653074 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-kubeconfig\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653086 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-usr-share-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653148 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/a1c5b18f7c6a5d58f0bc737993beed22-etcd-data\") pod \"etcd-enable-default-cni-999044\" (UID: \"a1c5b18f7c6a5d58f0bc737993beed22\") " pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653167 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-k8s-certs\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653179 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-etc-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653202 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-usr-local-share-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.653214 1763 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-usr-local-share-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: I1102 23:25:21.783996 1763 kubelet_node_status.go:75] "Attempting to register node" node="enable-default-cni-999044" + Nov 02 23:25:21 enable-default-cni-999044 kubelet[1763]: E1102 23:25:21.784181 1763 kubelet_node_status.go:107] "Unable to register node with API server" err="Post \"https://192.168.76.2:8443/api/v1/nodes\": dial tcp 192.168.76.2:8443: connect: connection refused" node="enable-default-cni-999044" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: E1102 23:25:22.051300 1763 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://192.168.76.2:8443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/enable-default-cni-999044?timeout=10s\": dial tcp 192.168.76.2:8443: connect: connection refused" interval="800ms" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: I1102 23:25:22.184766 1763 kubelet_node_status.go:75] "Attempting to register node" node="enable-default-cni-999044" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: E1102 23:25:22.473086 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: E1102 23:25:22.477343 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: E1102 23:25:22.480208 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:22 enable-default-cni-999044 kubelet[1763]: E1102 23:25:22.483085 1763 kubelet.go:3215] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.347880 1763 nodelease.go:49] "Failed to get node when trying to set owner ref to the node lease" err="nodes \"enable-default-cni-999044\" not found" node="enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.440549 1763 kubelet_node_status.go:78] "Successfully registered node" node="enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.444792 1763 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.449978 1763 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.450033 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.456056 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.456079 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.459077 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.459102 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.460491 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.460505 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.461419 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.485381 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.485430 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: I1102 23:25:23.485463 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.486467 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.486489 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:23 enable-default-cni-999044 kubelet[1763]: E1102 23:25:23.486592 1763 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-enable-default-cni-999044\" is forbidden: no PriorityClass with name system-node-critical was found" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:24 enable-default-cni-999044 kubelet[1763]: I1102 23:25:24.488210 1763 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 systemd[1]: Stopping kubelet.service - kubelet: The Kubernetes Node Agent... + ░░ Subject: A stop job for unit kubelet.service has begun execution + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has begun execution. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:25 enable-default-cni-999044 kubelet[1763]: I1102 23:25:25.522597 1763 dynamic_cafile_content.go:175] "Shutting down controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:25 enable-default-cni-999044 systemd[1]: kubelet.service: Deactivated successfully. + ░░ Subject: Unit succeeded + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ The unit kubelet.service has successfully entered the 'dead' state. + Nov 02 23:25:25 enable-default-cni-999044 systemd[1]: Stopped kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A stop job for unit kubelet.service has finished + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A stop job for unit kubelet.service has finished. + ░░  + ░░ The job identifier is 802 and the job result is done. + Nov 02 23:25:25 enable-default-cni-999044 systemd[1]: Started kubelet.service - kubelet: The Kubernetes Node Agent. + ░░ Subject: A start job for unit kubelet.service has finished successfully + ░░ Defined-By: systemd + ░░ Support: https://www.debian.org/support + ░░  + ░░ A start job for unit kubelet.service has finished successfully. + ░░  + ░░ The job identifier is 802. + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.566810 2227 server.go:529] "Kubelet version" kubeletVersion="v1.34.1" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.566854 2227 server.go:531] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.566870 2227 watchdog_linux.go:95] "Systemd watchdog is not enabled" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.566874 2227 watchdog_linux.go:137] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.567001 2227 server.go:956] "Client rotation is on, will bootstrap in background" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.567656 2227 certificate_store.go:147] "Loading cert/key pair from a file" filePath="/var/lib/kubelet/pki/kubelet-client-current.pem" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.568773 2227 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/var/lib/minikube/certs/ca.crt" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.571179 2227 server.go:1423] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574208 2227 server.go:781] "--cgroups-per-qos enabled, but --cgroup-root was not specified. Defaulting to /" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574222 2227 server.go:842] "NoSwap is set due to memorySwapBehavior not specified" memorySwapBehavior="" FailSwapOn=false + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574338 2227 container_manager_linux.go:270] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574354 2227 container_manager_linux.go:275] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"enable-default-cni-999044","RuntimeCgroupsName":"","SystemCgroupsName":"","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":false,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":null,"HardEvictionThresholds":[],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"MemoryManagerPolicy":"None","MemoryManagerReservedMemory":null,"PodPidsLimit":-1,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574440 2227 topology_manager.go:138] "Creating topology manager with none policy" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574445 2227 container_manager_linux.go:306] "Creating device plugin manager" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574463 2227 container_manager_linux.go:315] "Creating Dynamic Resource Allocation (DRA) manager" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574804 2227 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574908 2227 kubelet.go:475] "Attempting to sync node with API server" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574941 2227 kubelet.go:376] "Adding static pod path" path="/etc/kubernetes/manifests" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574963 2227 kubelet.go:387] "Adding apiserver pod source" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.574980 2227 apiserver.go:42] "Waiting for node sync before watching apiserver pods" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.575524 2227 kuberuntime_manager.go:291] "Container runtime initialized" containerRuntime="docker" version="28.5.1" apiVersion="v1" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.575824 2227 kubelet.go:940] "Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.575846 2227 kubelet.go:964] "Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.576387 2227 server.go:1262] "Started kubelet" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.576424 2227 server.go:180] "Starting to listen" address="0.0.0.0" port=10250 + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.576492 2227 ratelimit.go:56] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.576537 2227 server_v1.go:49] "podresources" method="list" useActivePods=true + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.576784 2227 server.go:249] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.579320 2227 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.579588 2227 dynamic_serving_content.go:135] "Starting controller" name="kubelet-server-cert-files::/var/lib/kubelet/pki/kubelet.crt::/var/lib/kubelet/pki/kubelet.key" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.579818 2227 volume_manager.go:313] "Starting Kubelet Volume Manager" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: E1102 23:25:25.579995 2227 kubelet_node_status.go:404] "Error getting the current node from lister" err="node \"enable-default-cni-999044\" not found" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.580033 2227 desired_state_of_world_populator.go:146] "Desired state populator starts to run" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.580223 2227 reconciler.go:29] "Reconciler: start to sync state" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.580476 2227 factory.go:223] Registration of the systemd container factory successfully + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.580566 2227 factory.go:221] Registration of the crio container factory failed: Get "http://%2Fvar%2Frun%2Fcrio%2Fcrio.sock/info": dial unix /var/run/crio/crio.sock: connect: no such file or directory + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.583344 2227 server.go:310] "Adding debug handlers to kubelet server" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.585040 2227 factory.go:223] Registration of the containerd container factory successfully + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.591496 2227 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv4" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.592514 2227 kubelet_network_linux.go:54] "Initialized iptables rules." protocol="IPv6" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.592526 2227 status_manager.go:244] "Starting to sync pod status with apiserver" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.592556 2227 kubelet.go:2427] "Starting kubelet main sync loop" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: E1102 23:25:25.592597 2227 kubelet.go:2451] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600714 2227 cpu_manager.go:221] "Starting CPU manager" policy="none" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600722 2227 cpu_manager.go:222] "Reconciling" reconcilePeriod="10s" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600740 2227 state_mem.go:36] "Initialized new in-memory state store" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600843 2227 state_mem.go:88] "Updated default CPUSet" cpuSet="" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600849 2227 state_mem.go:96] "Updated CPUSet assignments" assignments={} + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600861 2227 policy_none.go:49] "None policy: Start" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600867 2227 memory_manager.go:187] "Starting memorymanager" policy="None" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600875 2227 state_mem.go:36] "Initializing new in-memory state store" logger="Memory Manager state checkpoint" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600970 2227 state_mem.go:77] "Updated machine memory state" logger="Memory Manager state checkpoint" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.600982 2227 policy_none.go:47] "Start" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: E1102 23:25:25.603406 2227 manager.go:513] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.603508 2227 eviction_manager.go:189] "Eviction manager: starting control loop" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.603517 2227 container_log_manager.go:146] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.603647 2227 plugin_manager.go:118] "Starting Kubelet Plugin Manager" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: E1102 23:25:25.604196 2227 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="no imagefs label for configured runtime" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.693401 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.693456 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.693480 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.693513 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: E1102 23:25:25.696192 2227 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-enable-default-cni-999044\" already exists" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.704605 2227 kubelet_node_status.go:75] "Attempting to register node" node="enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.708069 2227 kubelet_node_status.go:124] "Node was previously registered" node="enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.708124 2227 kubelet_node_status.go:78] "Successfully registered node" node="enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780855 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-ca-certs\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780875 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-etc-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780905 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-flexvolume-dir\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780926 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-k8s-certs\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780934 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-kubeconfig\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780948 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-usr-local-share-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.780987 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/d244be932e6a9f01cd727e293e0c9d59-usr-share-ca-certificates\") pod \"kube-controller-manager-enable-default-cni-999044\" (UID: \"d244be932e6a9f01cd727e293e0c9d59\") " pod="kube-system/kube-controller-manager-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881356 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-data\" (UniqueName: \"kubernetes.io/host-path/a1c5b18f7c6a5d58f0bc737993beed22-etcd-data\") pod \"etcd-enable-default-cni-999044\" (UID: \"a1c5b18f7c6a5d58f0bc737993beed22\") " pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881374 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-k8s-certs\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881408 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-etc-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881512 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/a1c5b18f7c6a5d58f0bc737993beed22-etcd-certs\") pod \"etcd-enable-default-cni-999044\" (UID: \"a1c5b18f7c6a5d58f0bc737993beed22\") " pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881546 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/03b4e55f02e8323f734f8f6b5901761a-kubeconfig\") pod \"kube-scheduler-enable-default-cni-999044\" (UID: \"03b4e55f02e8323f734f8f6b5901761a\") " pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881559 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-ca-certs\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881571 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-usr-local-share-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:25 enable-default-cni-999044 kubelet[2227]: I1102 23:25:25.881581 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/82310323aedbc6ef3df9fa46725030e8-usr-share-ca-certificates\") pod \"kube-apiserver-enable-default-cni-999044\" (UID: \"82310323aedbc6ef3df9fa46725030e8\") " pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.575337 2227 apiserver.go:52] "Watching apiserver" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.580264 2227 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.596113 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-enable-default-cni-999044" podStartSLOduration=2.596101241 podStartE2EDuration="2.596101241s" podCreationTimestamp="2025-11-02 23:25:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:26.5916378 +0000 UTC m=+1.048211255" watchObservedRunningTime="2025-11-02 23:25:26.596101241 +0000 UTC m=+1.052674691" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.600756 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-enable-default-cni-999044" podStartSLOduration=1.600747087 podStartE2EDuration="1.600747087s" podCreationTimestamp="2025-11-02 23:25:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:26.596204258 +0000 UTC m=+1.052777697" watchObservedRunningTime="2025-11-02 23:25:26.600747087 +0000 UTC m=+1.057320540" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.600826 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-enable-default-cni-999044" podStartSLOduration=1.6008226479999998 podStartE2EDuration="1.600822648s" podCreationTimestamp="2025-11-02 23:25:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:26.600647246 +0000 UTC m=+1.057220704" watchObservedRunningTime="2025-11-02 23:25:26.600822648 +0000 UTC m=+1.057396111" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.605019 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-enable-default-cni-999044" podStartSLOduration=1.605009412 podStartE2EDuration="1.605009412s" podCreationTimestamp="2025-11-02 23:25:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:26.604968953 +0000 UTC m=+1.061542408" watchObservedRunningTime="2025-11-02 23:25:26.605009412 +0000 UTC m=+1.061582870" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.614233 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.614322 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: I1102 23:25:26.614330 2227 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: E1102 23:25:26.638323 2227 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-enable-default-cni-999044\" already exists" pod="kube-system/kube-scheduler-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: E1102 23:25:26.638770 2227 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-enable-default-cni-999044\" already exists" pod="kube-system/etcd-enable-default-cni-999044" + Nov 02 23:25:26 enable-default-cni-999044 kubelet[2227]: E1102 23:25:26.638889 2227 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-enable-default-cni-999044\" already exists" pod="kube-system/kube-apiserver-enable-default-cni-999044" + Nov 02 23:25:27 enable-default-cni-999044 kubelet[2227]: I1102 23:25:27.229493 2227 kubelet_node_status.go:439] "Fast updating node status as it just became ready" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.512692 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/7d5b920b-3327-459b-9608-03c4d53da52a-kube-proxy\") pod \"kube-proxy-nlt66\" (UID: \"7d5b920b-3327-459b-9608-03c4d53da52a\") " pod="kube-system/kube-proxy-nlt66" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.512718 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxhpj\" (UniqueName: \"kubernetes.io/projected/7d5b920b-3327-459b-9608-03c4d53da52a-kube-api-access-zxhpj\") pod \"kube-proxy-nlt66\" (UID: \"7d5b920b-3327-459b-9608-03c4d53da52a\") " pod="kube-system/kube-proxy-nlt66" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.512735 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/7d5b920b-3327-459b-9608-03c4d53da52a-xtables-lock\") pod \"kube-proxy-nlt66\" (UID: \"7d5b920b-3327-459b-9608-03c4d53da52a\") " pod="kube-system/kube-proxy-nlt66" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.512747 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/7d5b920b-3327-459b-9608-03c4d53da52a-lib-modules\") pod \"kube-proxy-nlt66\" (UID: \"7d5b920b-3327-459b-9608-03c4d53da52a\") " pod="kube-system/kube-proxy-nlt66" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.613930 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/af60533a-a9f5-41d9-ad50-d968b014d79e-config-volume\") pod \"coredns-66bc5c9577-6bblg\" (UID: \"af60533a-a9f5-41d9-ad50-d968b014d79e\") " pod="kube-system/coredns-66bc5c9577-6bblg" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.613962 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fd7b2317-e3fe-43db-94b2-9750284d99e6-config-volume\") pod \"coredns-66bc5c9577-z2vbd\" (UID: \"fd7b2317-e3fe-43db-94b2-9750284d99e6\") " pod="kube-system/coredns-66bc5c9577-z2vbd" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.614014 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22pzv\" (UniqueName: \"kubernetes.io/projected/af60533a-a9f5-41d9-ad50-d968b014d79e-kube-api-access-22pzv\") pod \"coredns-66bc5c9577-6bblg\" (UID: \"af60533a-a9f5-41d9-ad50-d968b014d79e\") " pod="kube-system/coredns-66bc5c9577-6bblg" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.614044 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pft2\" (UniqueName: \"kubernetes.io/projected/fd7b2317-e3fe-43db-94b2-9750284d99e6-kube-api-access-6pft2\") pod \"coredns-66bc5c9577-z2vbd\" (UID: \"fd7b2317-e3fe-43db-94b2-9750284d99e6\") " pod="kube-system/coredns-66bc5c9577-z2vbd" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: E1102 23:25:31.707795 2227 pod_workers.go:1324] "Error syncing pod, skipping" err="unmounted volumes=[config-volume kube-api-access-6pft2], unattached volumes=[], failed to process volumes=[]: context canceled" pod="kube-system/coredns-66bc5c9577-z2vbd" podUID="fd7b2317-e3fe-43db-94b2-9750284d99e6" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.915407 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/host-path/6bcc61b3-38b2-420b-8cda-69d3f1d6d67a-tmp\") pod \"storage-provisioner\" (UID: \"6bcc61b3-38b2-420b-8cda-69d3f1d6d67a\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:31 enable-default-cni-999044 kubelet[2227]: I1102 23:25:31.915427 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz84t\" (UniqueName: \"kubernetes.io/projected/6bcc61b3-38b2-420b-8cda-69d3f1d6d67a-kube-api-access-dz84t\") pod \"storage-provisioner\" (UID: \"6bcc61b3-38b2-420b-8cda-69d3f1d6d67a\") " pod="kube-system/storage-provisioner" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.644254 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-proxy-nlt66" podStartSLOduration=1.644239129 podStartE2EDuration="1.644239129s" podCreationTimestamp="2025-11-02 23:25:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:32.63877577 +0000 UTC m=+7.095349224" watchObservedRunningTime="2025-11-02 23:25:32.644239129 +0000 UTC m=+7.100812623" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.644804 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/coredns-66bc5c9577-6bblg" podStartSLOduration=1.644788372 podStartE2EDuration="1.644788372s" podCreationTimestamp="2025-11-02 23:25:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:32.64460871 +0000 UTC m=+7.101182172" watchObservedRunningTime="2025-11-02 23:25:32.644788372 +0000 UTC m=+7.101361833" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.654500 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=1.654489609 podStartE2EDuration="1.654489609s" podCreationTimestamp="2025-11-02 23:25:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:25:32.65444439 +0000 UTC m=+7.111017851" watchObservedRunningTime="2025-11-02 23:25:32.654489609 +0000 UTC m=+7.111063066" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.719794 2227 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pft2\" (UniqueName: \"kubernetes.io/projected/fd7b2317-e3fe-43db-94b2-9750284d99e6-kube-api-access-6pft2\") pod \"fd7b2317-e3fe-43db-94b2-9750284d99e6\" (UID: \"fd7b2317-e3fe-43db-94b2-9750284d99e6\") " + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.719816 2227 reconciler_common.go:163] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fd7b2317-e3fe-43db-94b2-9750284d99e6-config-volume\") pod \"fd7b2317-e3fe-43db-94b2-9750284d99e6\" (UID: \"fd7b2317-e3fe-43db-94b2-9750284d99e6\") " + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.720190 2227 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fd7b2317-e3fe-43db-94b2-9750284d99e6-config-volume" (OuterVolumeSpecName: "config-volume") pod "fd7b2317-e3fe-43db-94b2-9750284d99e6" (UID: "fd7b2317-e3fe-43db-94b2-9750284d99e6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.721207 2227 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd7b2317-e3fe-43db-94b2-9750284d99e6-kube-api-access-6pft2" (OuterVolumeSpecName: "kube-api-access-6pft2") pod "fd7b2317-e3fe-43db-94b2-9750284d99e6" (UID: "fd7b2317-e3fe-43db-94b2-9750284d99e6"). InnerVolumeSpecName "kube-api-access-6pft2". PluginName "kubernetes.io/projected", VolumeGIDValue "" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.820292 2227 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6pft2\" (UniqueName: \"kubernetes.io/projected/fd7b2317-e3fe-43db-94b2-9750284d99e6-kube-api-access-6pft2\") on node \"enable-default-cni-999044\" DevicePath \"\"" + Nov 02 23:25:32 enable-default-cni-999044 kubelet[2227]: I1102 23:25:32.820307 2227 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fd7b2317-e3fe-43db-94b2-9750284d99e6-config-volume\") on node \"enable-default-cni-999044\" DevicePath \"\"" + Nov 02 23:25:34 enable-default-cni-999044 kubelet[2227]: I1102 23:25:34.125503 2227 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" + Nov 02 23:25:35 enable-default-cni-999044 kubelet[2227]: I1102 23:25:35.596115 2227 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd7b2317-e3fe-43db-94b2-9750284d99e6" path="/var/lib/kubelet/pods/fd7b2317-e3fe-43db-94b2-9750284d99e6/volumes" + Nov 02 23:25:36 enable-default-cni-999044 kubelet[2227]: I1102 23:25:36.056614 2227 kuberuntime_manager.go:1828] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" + Nov 02 23:25:36 enable-default-cni-999044 kubelet[2227]: I1102 23:25:36.057122 2227 kubelet_network.go:47] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" + Nov 02 23:26:02 enable-default-cni-999044 kubelet[2227]: I1102 23:26:02.735851 2227 scope.go:117] "RemoveContainer" containerID="6591222ccfe8c2a08672c63541f176c814736970da9c70afa258dde83f641d2e" + Nov 02 23:26:06 enable-default-cni-999044 kubelet[2227]: I1102 23:26:06.281779 2227 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knn6m\" (UniqueName: \"kubernetes.io/projected/89714356-9111-4f71-953e-c8949c1c52ec-kube-api-access-knn6m\") pod \"netcat-cd4db9dbf-h69w6\" (UID: \"89714356-9111-4f71-953e-c8949c1c52ec\") " pod="default/netcat-cd4db9dbf-h69w6" + Nov 02 23:26:08 enable-default-cni-999044 kubelet[2227]: I1102 23:26:08.781417 2227 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="default/netcat-cd4db9dbf-h69w6" podStartSLOduration=1.7814128710000001 podStartE2EDuration="2.781389708s" podCreationTimestamp="2025-11-02 23:26:06 +0000 UTC" firstStartedPulling="2025-11-02 23:26:06.682134258 +0000 UTC m=+41.138707713" lastFinishedPulling="2025-11-02 23:26:07.682111106 +0000 UTC m=+42.138684550" observedRunningTime="2025-11-02 23:26:08.781171695 +0000 UTC m=+43.237745153" watchObservedRunningTime="2025-11-02 23:26:08.781389708 +0000 UTC m=+43.237963166" + Nov 02 23:26:20 enable-default-cni-999044 kubelet[2227]: E1102 23:26:20.674891 2227 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:45992->[::1]:39843: write tcp [::1]:45992->[::1]:39843: write: broken pipe + Nov 02 23:26:20 enable-default-cni-999044 kubelet[2227]: E1102 23:26:20.912010 2227 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp [::1]:51050->[::1]:39843: write tcp [::1]:51050->[::1]:39843: write: broken pipe + + + >>> host: /etc/kubernetes/kubelet.conf: + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.76.2:8443 + name: mk + contexts: + - context: + cluster: mk + user: system:node:enable-default-cni-999044 + name: system:node:enable-default-cni-999044@mk + current-context: system:node:enable-default-cni-999044@mk + kind: Config + users: + - name: system:node:enable-default-cni-999044 + user: + client-certificate: /var/lib/kubelet/pki/kubelet-client-current.pem + client-key: /var/lib/kubelet/pki/kubelet-client-current.pem + + + >>> host: /var/lib/kubelet/config.yaml: + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.76.2:8443 + name: enable-default-cni-999044 + contexts: + - context: + cluster: enable-default-cni-999044 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:25:31 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: enable-default-cni-999044 + name: enable-default-cni-999044 + current-context: enable-default-cni-999044 + kind: Config + users: + - name: enable-default-cni-999044 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/enable-default-cni-999044/client.key + + + >>> k8s: cms: + apiVersion: v1 + items: + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:30Z" + name: kube-root-ca.crt + namespace: default + resourceVersion: "299" + uid: 6b124f70-4b84-471c-ab66-85a9fe6fbe28 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:30Z" + name: kube-root-ca.crt + namespace: kube-node-lease + resourceVersion: "300" + uid: 5633d681-7163-4d07-a0ed-91868f4faa77 + - apiVersion: v1 + data: + jws-kubeconfig-9ty5n3: eyJhbGciOiJIUzI1NiIsImtpZCI6Ijl0eTVuMyJ9..FiSRvHx5Zp8iBZHApYzK9Z5ea6nOZEhHuif0CHWnncg + kubeconfig: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCakNDQWU2Z0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwdGFXNXAKYTNWaVpVTkJNQjRYRFRJMU1URXdNVEl5TkRjd01sb1hEVE0xTVRBek1USXlORGN3TWxvd0ZURVRNQkVHQTFVRQpBeE1LYldsdWFXdDFZbVZEUVRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTHhOCkJ1alFrRFJsbWNMV2JtaHhzcEFaZUF5WU5waFBBQmtVTnlLYnhuUnpCTzlxa1dGMllKUkRZUFBDdTZpMmRDOVkKM3VOK2ZHS04wUVFsNlFDWmlPOFY0cWhDdW96N25VaFdyTTRtSmlubVc4ckxuOU0rdXN1dCs2dEZHYUF1NzBNKwprbjNnWmVQZlpRK0ZCSEZSbVA2YkJQQll6QWw3WkM0cUNlMjhQSkdWRHFlczZiZG0xdkxSVzNXQ1NYdXg1eEhoCkRKdVdTekhCeTBOOStTMVh6VDFBVlJsU3ZZM05wajRvNTBkZHBWWERPZ0drWVMxUHZtY3NSVDJQQzZXWHZqaWYKcUg2dnJxUzlXdUZoVitEWnFsNVo3Zk1BVUdKMk1uTjArL2FScmYzZ3RGZVlhUjFkRkt6ckN0QnNzdzA4UFQvdgpqTmRmOER2Y0ZleU9CeUxDbTZVQ0F3RUFBYU5oTUY4d0RnWURWUjBQQVFIL0JBUURBZ0trTUIwR0ExVWRKUVFXCk1CUUdDQ3NHQVFVRkJ3TUNCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVcKQkJTd3o4WSttMnhEVXpiT1dJL25lR0R5OThDNHFEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFMQlNnL2w0dwovVWllZklKUEc4VXVSV2dqbUpXRjM4NFQwSjF3MTYzVk1MRTZYK3gvRllIdlo5VWJmNG0zZksyQkdCQkRXZmZOCkM4dVliL1dkcEY2NG9nT1E1bnd2ekhnZWNoYmpoSVMrUEk2Qkpxb2NhZXZyeE5VMTFUL01wdmR0dHpvUjBuK1YKY0NMblFVL2I2VE0weXVZODFjRHlrYTk0YUwvNVZTK2NlMzVSVkhFcEhPbHc0UjZsSnYyQ09ab1puUjdjbitaZQpZTVpvd3h1N2V2amM2aVFXb1ZselNRcG04M3hJVUtzNUdKODBKeTJYUjA2Zmw3Y2RNUGlPb1JFZzIyd0k0VUdyCmVlRWdRZkpST3pQRnpBMFhvaHVTc1BIOFR2R3orNnF3OExPR1FZV0VaY1pUZUlZYkd3N0lVVkFiaW1JSmRKT3QKUUVZNnlRdWNxSFZXV0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://control-plane.minikube.internal:8443 + name: "" + contexts: null + current-context: "" + kind: Config + users: null + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:25Z" + name: cluster-info + namespace: kube-public + resourceVersion: "321" + uid: 04917037-b124-492f-88cb-1be6cab06080 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:30Z" + name: kube-root-ca.crt + namespace: kube-public + resourceVersion: "301" + uid: f63ee5b2-64df-46d2-8766-c2dbae39367d + - apiVersion: v1 + data: + Corefile: | + .:53 { + log + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + hosts { + 192.168.76.1 host.minikube.internal + fallthrough + } + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 { + disable success cluster.local + disable denial cluster.local + } + loop + reload + loadbalance + } + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:25Z" + name: coredns + namespace: kube-system + resourceVersion: "345" + uid: 55c6d27e-2b1d-4922-8b57-f08f19a9ceda + - apiVersion: v1 + data: + client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + requestheader-allowed-names: '["front-proxy-client"]' + requestheader-client-ca-file: | + -----BEGIN CERTIFICATE----- + MIIDETCCAfmgAwIBAgIISxBzoWC7/gcwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE + AxMOZnJvbnQtcHJveHktY2EwHhcNMjUxMTAyMjMyMDE4WhcNMzUxMDMxMjMyNTE4 + WjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD + ggEPADCCAQoCggEBAMkcERYPkQ2jL7fHHLeNBrvSXWNE3Q67baw2pV7ppxkbQ0xQ + Bv9P6Kyr0QaZ6sCUXsJgRkF+NSxP9EBlgm/M/JX2SKzLAi3ruGHRW0RW4sFgVAKx + XBupqtEPZgNlaqax/K742HzQ1oY8rIOmH1sptyy/xxeLh2GtZ2llO/LVLHY9xwRw + f4oaIgiQXON6w/QTIRpzEspZyK5yS2PbBBmW3VVWRfa/mNGQO5OUE6bXZOgBVMO3 + AZO1u22Xs80QVX6LMACRQDD+magN9nmuAJi0ifCmtTjOGVZCfdRRmK3StXXM03b3 + VGWnyDPYZ+xiSw7ioxD8OAcjj69nyND+K0wMX2ECAwEAAaNdMFswDgYDVR0PAQH/ + BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFA5vxfvvPjCCvh4+zinG + dW7fCcS6MBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA + A4IBAQAP2Rm0z+nc1sOKy45jfqWz0+Ps7gRBJ/YWokFzq3R+37/V6Kr6isqCB6rF + 5bTmcYqsbk9myG4Dy0bXT4EMlIOplGPZ/UAyxWIj8J41RFMIbK/Os4LxSw2zPh5M + thMenF+R2rZU4/TDdCkr+GMUwJCi5mfhYakMLj5MTU7YeWWdAVpMYYbrXg7L8lay + fU/nUQzEkF7xS97MyzasRmCIQQRgLdAvG4cxh9so+UEj3QSGplF0ExI40IAI+V8b + OQwF8Ml57qV1yxeQNZq0mDWOiByQWD5O8BHQKTcym3BgwguoMVRSpbL9RGpjU5Sg + 7FXybKHi2NBX+pKRFpDaQ79+eTAe + -----END CERTIFICATE----- + requestheader-extra-headers-prefix: '["X-Remote-Extra-"]' + requestheader-group-headers: '["X-Remote-Group"]' + requestheader-username-headers: '["X-Remote-User"]' + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:23Z" + name: extension-apiserver-authentication + namespace: kube-system + resourceVersion: "26" + uid: 8cb5728b-9df7-4c0c-9d96-eb87657e02a8 + - apiVersion: v1 + data: + since: "2025-11-02" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:23Z" + name: kube-apiserver-legacy-service-account-token-tracking + namespace: kube-system + resourceVersion: "65" + uid: 465bbf96-4beb-409b-8f5a-bfa022492ef3 + - apiVersion: v1 + data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + bindAddress: 0.0.0.0 + bindAddressHardFail: false + clientConnection: + acceptContentTypes: "" + burst: 0 + contentType: "" + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 0 + clusterCIDR: 10.244.0.0/16 + configSyncPeriod: 0s + conntrack: + maxPerCore: 0 + min: null + tcpBeLiberal: false + tcpCloseWaitTimeout: 0s + tcpEstablishedTimeout: 0s + udpStreamTimeout: 0s + udpTimeout: 0s + detectLocal: + bridgeInterface: "" + interfaceNamePrefix: "" + detectLocalMode: "" + enableProfiling: false + healthzBindAddress: "" + hostnameOverride: "" + iptables: + localhostNodePorts: null + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + strictARP: false + syncPeriod: 0s + tcpFinTimeout: 0s + tcpTimeout: 0s + udpTimeout: 0s + kind: KubeProxyConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + metricsBindAddress: 0.0.0.0:10249 + mode: "" + nftables: + masqueradeAll: false + masqueradeBit: null + minSyncPeriod: 0s + syncPeriod: 0s + nodePortAddresses: null + oomScoreAdj: null + portRange: "" + showHiddenMetricsForVersion: "" + winkernel: + enableDSR: false + forwardHealthCheckVip: false + networkName: "" + rootHnsEndpointName: "" + sourceVip: "" + kubeconfig.conf: |- + apiVersion: v1 + kind: Config + clusters: + - cluster: + certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + server: https://control-plane.minikube.internal:8443 + name: default + contexts: + - context: + cluster: default + namespace: default + user: default + name: default + current-context: default + users: + - name: default + user: + tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:25Z" + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system + resourceVersion: "253" + uid: ef6b95f6-585a-41b4-a8e2-28001e0edb37 + - apiVersion: v1 + data: + ca.crt: | + -----BEGIN CERTIFICATE----- + MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p + a3ViZUNBMB4XDTI1MTEwMTIyNDcwMloXDTM1MTAzMTIyNDcwMlowFTETMBEGA1UE + AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALxN + BujQkDRlmcLWbmhxspAZeAyYNphPABkUNyKbxnRzBO9qkWF2YJRDYPPCu6i2dC9Y + 3uN+fGKN0QQl6QCZiO8V4qhCuoz7nUhWrM4mJinmW8rLn9M+usut+6tFGaAu70M+ + kn3gZePfZQ+FBHFRmP6bBPBYzAl7ZC4qCe28PJGVDqes6bdm1vLRW3WCSXux5xHh + DJuWSzHBy0N9+S1XzT1AVRlSvY3Npj4o50ddpVXDOgGkYS1PvmcsRT2PC6WXvjif + qH6vrqS9WuFhV+DZql5Z7fMAUGJ2MnN0+/aRrf3gtFeYaR1dFKzrCtBssw08PT/v + jNdf8DvcFeyOByLCm6UCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW + MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW + BBSwz8Y+m2xDUzbOWI/neGDy98C4qDANBgkqhkiG9w0BAQsFAAOCAQEALBSg/l4w + /UiefIJPG8UuRWgjmJWF384T0J1w163VMLE6X+x/FYHvZ9Ubf4m3fK2BGBBDWffN + C8uYb/WdpF64ogOQ5nwvzHgechbjhIS+PI6BJqocaevrxNU11T/MpvdttzoR0n+V + cCLnQU/b6TM0yuY81cDyka94aL/5VS+ce35RVHEpHOlw4R6lJv2COZoZnR7cn+Ze + YMZowxu7evjc6iQWoVlzSQpm83xIUKs5GJ80Jy2XR06fl7cdMPiOoREg22wI4UGr + eeEgQfJROzPFzA0XohuSsPH8TvGz+6qw8LOGQYWEZcZTeIYbGw7IUVAbimIJdJOt + QEY6yQucqHVWWA== + -----END CERTIFICATE----- + kind: ConfigMap + metadata: + annotations: + kubernetes.io/description: Contains a CA bundle that can be used to verify the + kube-apiserver when using internal endpoints such as the internal service + IP or kubernetes.default.svc. No other usage is guaranteed across distributions + of Kubernetes clusters. + creationTimestamp: "2025-11-02T23:25:30Z" + name: kube-root-ca.crt + namespace: kube-system + resourceVersion: "302" + uid: b7301df6-173a-4c11-999f-f8b89e67707a + - apiVersion: v1 + data: + ClusterConfiguration: | + apiServer: + certSANs: + - 127.0.0.1 + - localhost + - 192.168.76.2 + extraArgs: + - name: enable-admission-plugins + value: NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota + apiVersion: kubeadm.k8s.io/v1beta4 + caCertificateValidityPeriod: 87600h0m0s + certificateValidityPeriod: 8760h0m0s + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + controllerManager: + extraArgs: + - name: allocate-node-cidrs + value: "true" + - name: leader-elect + value: "false" + dns: {} + encryptionAlgorithm: RSA-2048 + etcd: + local: + dataDir: /var/lib/minikube/etcd + imageRepository: registry.k8s.io + kind: ClusterConfiguration + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: 10.244.0.0/16 + serviceSubnet: 10.96.0.0/12 + proxy: {} + scheduler: + extraArgs: + - name: leader-elect + value: "false" + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:25Z" + name: kubeadm-config + namespace: kube-system + resourceVersion: "208" + uid: 646ca8cf-7b2a-48c8-be06-601729b00910 + - apiVersion: v1 + data: + kubelet: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.96.0.10 + clusterDomain: cluster.local + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + cpuManagerReconcilePeriod: 0s + crashLoopBackOff: {} + evictionHard: + imagefs.available: 0% + nodefs.available: 0% + nodefs.inodesFree: 0% + evictionPressureTransitionPeriod: 0s + failSwapOn: false + fileCheckFrequency: 0s + hairpinMode: hairpin-veth + healthzBindAddress: 127.0.0.1 + healthzPort: 10248 + httpCheckFrequency: 0s + imageGCHighThresholdPercent: 100 + imageMaximumGCAge: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + text: + infoBufferSize: "0" + verbosity: 0 + memorySwap: {} + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + rotateCertificates: true + runtimeRequestTimeout: 15m0s + shutdownGracePeriod: 0s + shutdownGracePeriodCriticalPods: 0s + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + kind: ConfigMap + metadata: + creationTimestamp: "2025-11-02T23:25:25Z" + name: kubelet-config + namespace: kube-system + resourceVersion: "211" + uid: aa62e326-97d0-46d4-95c8-e4b298945f4e + kind: List + metadata: + resourceVersion: "" + + + >>> host: docker daemon status: + ● docker.service - Docker Application Container Engine + Loaded: loaded (]8;;file://enable-default-cni-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;;; enabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:16 UTC; 1min 11s ago + TriggeredBy: ● docker.socket + Docs: ]8;;https://docs.docker.comhttps://docs.docker.com]8;; + Main PID: 1049 (dockerd) + Tasks: 14 + Memory: 170.8M + CPU: 2.479s + CGroup: /system.slice/docker.service + └─1049 /usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.795065011Z" level=info msg="Loading containers: done." + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.801702495Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.801740798Z" level=info msg="Initializing buildkit" + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.813318066Z" level=info msg="Completed buildkit initialization" + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.815543270Z" level=info msg="Daemon has completed initialization" + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.815604869Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.815628036Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:25:16 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:25:16.815666257Z" level=info msg="API listen on [::]:2376" + Nov 02 23:25:16 enable-default-cni-999044 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:26:02 enable-default-cni-999044 dockerd[1049]: time="2025-11-02T23:26:02.269740073Z" level=info msg="ignoring event" container=6591222ccfe8c2a08672c63541f176c814736970da9c70afa258dde83f641d2e module=libcontainerd namespace=moby topic=/tasks/delete type="*events.TaskDelete" + + + >>> host: docker daemon config: + # ]8;;file://enable-default-cni-999044/lib/systemd/system/docker.service/lib/systemd/system/docker.service]8;; + [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/docker/daemon.json: + {"exec-opts":["native.cgroupdriver=systemd"],"log-driver":"json-file","log-opts":{"max-size":"100m"},"storage-driver":"overlay2"} + + >>> host: docker system info: + Client: Docker Engine - Community + Version: 28.5.1 + Context: default + Debug Mode: false + Plugins: + buildx: Docker Buildx (Docker Inc.) + Version: v0.29.1 + Path: /usr/libexec/docker/cli-plugins/docker-buildx + + Server: + Containers: 17 + Running: 16 + Paused: 0 + Stopped: 1 + Images: 9 + Server Version: 28.5.1 + Storage Driver: overlay2 + Backing Filesystem: extfs + Supports d_type: true + Using metacopy: false + Native Overlay Diff: true + userxattr: false + Logging Driver: json-file + Cgroup Driver: systemd + Cgroup Version: 2 + Plugins: + Volume: local + Network: bridge host ipvlan macvlan null overlay + Log: awslogs fluentd gcplogs gelf journald json-file local splunk syslog + CDI spec directories: + /etc/cdi + /var/run/cdi + Swarm: inactive + Runtimes: io.containerd.runc.v2 runc + Default Runtime: runc + Init Binary: docker-init + containerd version: b98a3aace656320842a23f4a392a33f46af97866 + runc version: v1.3.0-0-g4ca628d1 + init version: de40ad0 + Security Options: + seccomp + Profile: builtin + cgroupns + Kernel Version: 6.6.97+ + Operating System: Debian GNU/Linux 12 (bookworm) + OSType: linux + Architecture: x86_64 + CPUs: 8 + Total Memory: 60.83GiB + Name: enable-default-cni-999044 + ID: 6000d75f-4f8b-40d0-96d4-f6e3e4a38412 + Docker Root Dir: /var/lib/docker + Debug Mode: false + No Proxy: control-plane.minikube.internal + Labels: + provider=docker + Experimental: false + Insecure Registries: + 10.96.0.0/12 + ::1/128 + 127.0.0.0/8 + Live Restore Enabled: false + + + + >>> host: cri-docker daemon status: + ● cri-docker.service - CRI Interface for Docker Application Container Engine + Loaded: loaded (]8;;file://enable-default-cni-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;;; disabled; preset: enabled) + Drop-In: /etc/systemd/system/cri-docker.service.d + └─]8;;file://enable-default-cni-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf10-cni.conf]8;; + Active: active (running) since Sun 2025-11-02 23:25:17 UTC; 1min 11s ago + TriggeredBy: ● cri-docker.socket + Docs: ]8;;https://docs.mirantis.comhttps://docs.mirantis.com]8;; + Main PID: 1359 (cri-dockerd) + Tasks: 13 + Memory: 17.1M + CPU: 679ms + CGroup: /system.slice/cri-docker.service + └─1359 /usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + Nov 02 23:25:21 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/f0d983a7afd43e5d39c5d46a92934baa03ab58d2681a9b4444e4ea317d7ade4b/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/65191f66ec2e3cb6c2c12e5e37868ab037436ebdbb3f0383aed925f47333bbf7/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/08a183f3081c99bf3b5a7783bbb730ab6c314339ddc336043b2feeda24c6205a/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:21 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:21Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/aa5787d6ac78f4e8cc4743758acf3ddac2fbeeb3777771d54b614a02837b9dbf/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:31 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:31Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/46b9bcc4a2b5b2431d52206a602670e0931e9b75a2c0e9735a11f61b5e4a233e/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:32 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:32Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:32 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:32Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/1a71a9959cad64a3639d7620e72d3f60a41c245b11de7a4b12e14ec8548d5d0d/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:25:36 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:25:36Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:10.244.0.0/24,},}" + Nov 02 23:26:06 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:26:06Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e/resolv.conf as [nameserver 10.96.0.10 search default.svc.cluster.local svc.cluster.local cluster.local test-pods.svc.cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:26:07 enable-default-cni-999044 cri-dockerd[1359]: time="2025-11-02T23:26:07Z" level=info msg="Stop pulling image registry.k8s.io/e2e-test-images/agnhost:2.40: Status: Downloaded newer image for registry.k8s.io/e2e-test-images/agnhost:2.40" + + + >>> host: cri-docker daemon config: + # ]8;;file://enable-default-cni-999044/lib/systemd/system/cri-docker.service/lib/systemd/system/cri-docker.service]8;; + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + # ]8;;file://enable-default-cni-999044/etc/systemd/system/cri-docker.service.d/10-cni.conf/etc/systemd/system/cri-docker.service.d/10-cni.conf]8;; + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + [Service] + ExecStart= + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.k8s.io/pause:3.10.1 --network-plugin=cni --hairpin-mode=hairpin-veth + + >>> host: /usr/lib/systemd/system/cri-docker.service: + [Unit] + Description=CRI Interface for Docker Application Container Engine + Documentation=https://docs.mirantis.com + After=network-online.target firewalld.service docker.service + Wants=network-online.target + Requires=cri-docker.socket + + [Service] + Type=notify + ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// + ExecReload=/bin/kill -s HUP $MAINPID + TimeoutSec=0 + RestartSec=2 + Restart=always + + # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. + # Both the old, and new location are accepted by systemd 229 and up, so using the old location + # to make them work for either version of systemd. + StartLimitBurst=3 + + # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. + # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make + # this option work for either version of systemd. + StartLimitInterval=60s + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Comment TasksMax if your systemd version does not support it. + # Only systemd 226 and above support this option. + TasksMax=infinity + Delegate=yes + KillMode=process + + [Install] + WantedBy=multi-user.target + + + >>> host: cri-dockerd version: + cri-dockerd dev (HEAD) + + + >>> host: containerd daemon status: + ● containerd.service - containerd container runtime + Loaded: loaded (]8;;file://enable-default-cni-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;;; disabled; preset: enabled) + Active: active (running) since Sun 2025-11-02 23:25:16 UTC; 1min 13s ago + Docs: ]8;;https://containerd.iohttps://containerd.io]8;; + Main PID: 1036 (containerd) + Tasks: 189 + Memory: 94.1M + CPU: 1.025s + CGroup: /system.slice/containerd.service + ├─1036 /usr/bin/containerd + ├─1814 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 08a183f3081c99bf3b5a7783bbb730ab6c314339ddc336043b2feeda24c6205a -address /run/containerd/containerd.sock + ├─1817 /usr/bin/containerd-shim-runc-v2 -namespace moby -id f0d983a7afd43e5d39c5d46a92934baa03ab58d2681a9b4444e4ea317d7ade4b -address /run/containerd/containerd.sock + ├─1877 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 65191f66ec2e3cb6c2c12e5e37868ab037436ebdbb3f0383aed925f47333bbf7 -address /run/containerd/containerd.sock + ├─1889 /usr/bin/containerd-shim-runc-v2 -namespace moby -id aa5787d6ac78f4e8cc4743758acf3ddac2fbeeb3777771d54b614a02837b9dbf -address /run/containerd/containerd.sock + ├─2001 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 65dfdc5a33813dbd6b17a68a47225643fdab92c9d0aedff5458d004493054bd5 -address /run/containerd/containerd.sock + ├─2003 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 0627f9ec285b4e10280a3cfd7d495c018cd9f55437b9cbf2fc99c40fb6d69ad5 -address /run/containerd/containerd.sock + ├─2015 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1a2c7edf6b6cb32d23b45cdb0fa389b67703d116aabe2798c779533c2d310930 -address /run/containerd/containerd.sock + ├─2084 /usr/bin/containerd-shim-runc-v2 -namespace moby -id a9b561b41c6b3d2078040ab791b729842ea17f7807291b2c1c3efe9aa166cb72 -address /run/containerd/containerd.sock + ├─2541 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 46b9bcc4a2b5b2431d52206a602670e0931e9b75a2c0e9735a11f61b5e4a233e -address /run/containerd/containerd.sock + ├─2585 /usr/bin/containerd-shim-runc-v2 -namespace moby -id c5a1cf7eb578794961c439f44eb9beff4e42c3fab2e7cde0e4cf30ee30d38363 -address /run/containerd/containerd.sock + ├─2621 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ae92d332e0bd53b97b038ad6618378464802b339c47f0192d62684e40095ce8b -address /run/containerd/containerd.sock + ├─2744 /usr/bin/containerd-shim-runc-v2 -namespace moby -id ee284adbbe6dab5dcaac548d315d00279d7734876007c2412e281bb8461c1aac -address /run/containerd/containerd.sock + ├─2816 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 1a71a9959cad64a3639d7620e72d3f60a41c245b11de7a4b12e14ec8548d5d0d -address /run/containerd/containerd.sock + ├─3168 /usr/bin/containerd-shim-runc-v2 -namespace moby -id f6d6851014dc319d8d03d108f419520f2962c40811b7ff7ef839b735a3a12ba1 -address /run/containerd/containerd.sock + ├─3235 /usr/bin/containerd-shim-runc-v2 -namespace moby -id cd4b0636d616ead7c17063b9a02818b878ccfdf2bdb7afa4c67898e6125f9a2e -address /run/containerd/containerd.sock + └─3386 /usr/bin/containerd-shim-runc-v2 -namespace moby -id 3c4a0d7e1f985bd3755fb144cad41b52d34a0ee7332610f88fc1eb509b45d335 -address /run/containerd/containerd.sock + + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597611673Z" level=info msg=serving... address=/run/containerd/containerd.sock + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597637811Z" level=info msg="Start event monitor" + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597647425Z" level=info msg="Start snapshots syncer" + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597653548Z" level=info msg="Start cni network conf syncer for default" + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597658412Z" level=info msg="Start streaming server" + Nov 02 23:25:16 enable-default-cni-999044 systemd[1]: Started containerd.service - containerd container runtime. + Nov 02 23:25:16 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:25:16.597770608Z" level=info msg="containerd successfully booted in 0.020543s" + Nov 02 23:26:02 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:26:02.269709193Z" level=info msg="shim disconnected" id=6591222ccfe8c2a08672c63541f176c814736970da9c70afa258dde83f641d2e namespace=moby + Nov 02 23:26:02 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:26:02.269747277Z" level=warning msg="cleaning up after shim disconnected" id=6591222ccfe8c2a08672c63541f176c814736970da9c70afa258dde83f641d2e namespace=moby + Nov 02 23:26:02 enable-default-cni-999044 containerd[1036]: time="2025-11-02T23:26:02.269754275Z" level=info msg="cleaning up dead shim" namespace=moby + + + >>> host: containerd daemon config: + # ]8;;file://enable-default-cni-999044/lib/systemd/system/containerd.service/lib/systemd/system/containerd.service]8;; + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /lib/systemd/system/containerd.service: + # Copyright The containerd Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + [Unit] + Description=containerd container runtime + Documentation=https://containerd.io + After=network.target local-fs.target dbus.service + + [Service] + #uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration + #Environment="ENABLE_CRI_SANDBOXES=sandboxed" + ExecStartPre=-/sbin/modprobe overlay + ExecStart=/usr/bin/containerd + + Type=notify + Delegate=yes + KillMode=process + Restart=always + RestartSec=5 + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNPROC=infinity + LimitCORE=infinity + LimitNOFILE=infinity + # Comment TasksMax if your systemd version does not supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + OOMScoreAdjust=-999 + + [Install] + WantedBy=multi-user.target + + + >>> host: /etc/containerd/config.toml: + version = 2 + root = "/var/lib/containerd" + state = "/run/containerd" + oom_score = 0 + # imports + + [grpc] + address = "/run/containerd/containerd.sock" + uid = 0 + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + + [debug] + address = "" + uid = 0 + gid = 0 + level = "" + + [metrics] + address = "" + grpc_histogram = false + + [cgroup] + path = "" + + [plugins] + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + [plugins."io.containerd.grpc.v1.cri"] + enable_unprivileged_ports = true + stream_server_address = "" + stream_server_port = "10010" + enable_selinux = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + stats_collect_period = 10 + enable_tls_streaming = false + max_container_log_line_size = 16384 + restrict_oom_score_adj = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = true + snapshotter = "overlayfs" + default_runtime_name = "runc" + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + runtime_type = "" + runtime_engine = "" + runtime_root = "" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + [plugins."io.containerd.gc.v1.scheduler"] + pause_threshold = 0.02 + deletion_threshold = 0 + mutation_threshold = 100 + schedule_delay = "0s" + startup_delay = "100ms" + + + >>> host: containerd config dump: + disabled_plugins = [] + imports = ["/etc/containerd/config.toml"] + oom_score = 0 + plugin_dir = "" + required_plugins = [] + root = "/var/lib/containerd" + state = "/run/containerd" + temp = "" + version = 2 + + [cgroup] + path = "" + + [debug] + address = "" + format = "" + gid = 0 + level = "" + uid = 0 + + [grpc] + address = "/run/containerd/containerd.sock" + gid = 0 + max_recv_message_size = 16777216 + max_send_message_size = 16777216 + tcp_address = "" + tcp_tls_ca = "" + tcp_tls_cert = "" + tcp_tls_key = "" + uid = 0 + + [metrics] + address = "" + grpc_histogram = false + + [plugins] + + [plugins."io.containerd.gc.v1.scheduler"] + deletion_threshold = 0 + mutation_threshold = 100 + pause_threshold = 0.02 + schedule_delay = "0s" + startup_delay = "100ms" + + [plugins."io.containerd.grpc.v1.cri"] + cdi_spec_dirs = ["/etc/cdi", "/var/run/cdi"] + device_ownership_from_security_context = false + disable_apparmor = false + disable_cgroup = false + disable_hugetlb_controller = true + disable_proc_mount = false + disable_tcp_service = true + drain_exec_sync_io_timeout = "0s" + enable_cdi = false + enable_selinux = false + enable_tls_streaming = false + enable_unprivileged_icmp = false + enable_unprivileged_ports = true + ignore_deprecation_warnings = [] + ignore_image_defined_volumes = false + image_pull_progress_timeout = "5m0s" + image_pull_with_sync_fs = false + max_concurrent_downloads = 3 + max_container_log_line_size = 16384 + netns_mounts_under_state_dir = false + restrict_oom_score_adj = false + sandbox_image = "registry.k8s.io/pause:3.10.1" + selinux_category_range = 1024 + stats_collect_period = 10 + stream_idle_timeout = "4h0m0s" + stream_server_address = "" + stream_server_port = "10010" + systemd_cgroup = false + tolerate_missing_hugetlb_controller = true + unset_seccomp_profile = "" + + [plugins."io.containerd.grpc.v1.cri".cni] + bin_dir = "/opt/cni/bin" + conf_dir = "/etc/cni/net.d" + conf_template = "" + ip_pref = "" + max_conf_num = 1 + setup_serially = false + + [plugins."io.containerd.grpc.v1.cri".containerd] + default_runtime_name = "runc" + disable_snapshot_annotations = true + discard_unpacked_layers = true + ignore_blockio_not_enabled_errors = false + ignore_rdt_not_enabled_errors = false + no_pivot = false + snapshotter = "overlayfs" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "io.containerd.runc.v2" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] + base_runtime_spec = "" + cni_conf_dir = "" + cni_max_conf_num = 0 + container_annotations = [] + pod_annotations = [] + privileged_without_host_devices = false + privileged_without_host_devices_all_devices_allowed = false + runtime_engine = "" + runtime_path = "" + runtime_root = "" + runtime_type = "" + sandbox_mode = "" + snapshotter = "" + + [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options] + + [plugins."io.containerd.grpc.v1.cri".image_decryption] + key_model = "node" + + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" + + [plugins."io.containerd.grpc.v1.cri".registry.auths] + + [plugins."io.containerd.grpc.v1.cri".registry.configs] + + [plugins."io.containerd.grpc.v1.cri".registry.headers] + + [plugins."io.containerd.grpc.v1.cri".registry.mirrors] + + [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] + tls_cert_file = "" + tls_key_file = "" + + [plugins."io.containerd.internal.v1.opt"] + path = "/opt/containerd" + + [plugins."io.containerd.internal.v1.restart"] + interval = "10s" + + [plugins."io.containerd.internal.v1.tracing"] + + [plugins."io.containerd.metadata.v1.bolt"] + content_sharing_policy = "shared" + + [plugins."io.containerd.monitor.v1.cgroups"] + no_prometheus = false + + [plugins."io.containerd.nri.v1.nri"] + disable = true + disable_connections = false + plugin_config_path = "/etc/nri/conf.d" + plugin_path = "/opt/nri/plugins" + plugin_registration_timeout = "5s" + plugin_request_timeout = "2s" + socket_path = "/var/run/nri/nri.sock" + + [plugins."io.containerd.runtime.v1.linux"] + no_shim = false + runtime = "runc" + runtime_root = "" + shim = "containerd-shim" + shim_debug = false + + [plugins."io.containerd.runtime.v2.task"] + platforms = ["linux/amd64"] + sched_core = false + + [plugins."io.containerd.service.v1.diff-service"] + default = ["walking"] + sync_fs = false + + [plugins."io.containerd.service.v1.tasks-service"] + blockio_config_file = "" + rdt_config_file = "" + + [plugins."io.containerd.snapshotter.v1.aufs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.blockfile"] + fs_type = "" + mount_options = [] + root_path = "" + scratch_file = "" + + [plugins."io.containerd.snapshotter.v1.btrfs"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.devmapper"] + async_remove = false + base_image_size = "" + discard_blocks = false + fs_options = "" + fs_type = "" + pool_name = "" + root_path = "" + + [plugins."io.containerd.snapshotter.v1.native"] + root_path = "" + + [plugins."io.containerd.snapshotter.v1.overlayfs"] + mount_options = [] + root_path = "" + sync_remove = false + upperdir_label = false + + [plugins."io.containerd.snapshotter.v1.zfs"] + root_path = "" + + [plugins."io.containerd.tracing.processor.v1.otlp"] + + [plugins."io.containerd.transfer.v1.local"] + config_path = "" + max_concurrent_downloads = 3 + max_concurrent_uploaded_layers = 3 + + [[plugins."io.containerd.transfer.v1.local".unpack_config]] + differ = "walking" + platform = "linux/amd64" + snapshotter = "overlayfs" + + [proxy_plugins] + + [stream_processors] + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] + accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar" + + [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] + accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] + args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] + env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] + path = "ctd-decoder" + returns = "application/vnd.oci.image.layer.v1.tar+gzip" + + [timeouts] + "io.containerd.timeout.bolt.open" = "0s" + "io.containerd.timeout.metrics.shimstats" = "2s" + "io.containerd.timeout.shim.cleanup" = "5s" + "io.containerd.timeout.shim.load" = "5s" + "io.containerd.timeout.shim.shutdown" = "3s" + "io.containerd.timeout.task.state" = "2s" + + [ttrpc] + address = "" + gid = 0 + uid = 0 + + + >>> host: crio daemon status: + ○ crio.service - Container Runtime Interface for OCI (CRI-O) + Loaded: loaded (]8;;file://enable-default-cni-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;;; disabled; preset: enabled) + Active: inactive (dead) + Docs: ]8;;https://github.com/cri-o/cri-ohttps://github.com/cri-o/cri-o]8;; + ssh: Process exited with status 3 + + + >>> host: crio daemon config: + # ]8;;file://enable-default-cni-999044/lib/systemd/system/crio.service/lib/systemd/system/crio.service]8;; + [Unit] + Description=Container Runtime Interface for OCI (CRI-O) + Documentation=https://github.com/cri-o/cri-o + Wants=network-online.target + Before=kubelet.service + After=network-online.target + + [Service] + Type=notify + EnvironmentFile=-/etc/default/crio + Environment=GOTRACEBACK=crash + ExecStart=/usr/bin/crio \ + $CRIO_CONFIG_OPTIONS \ + $CRIO_RUNTIME_OPTIONS \ + $CRIO_STORAGE_OPTIONS \ + $CRIO_NETWORK_OPTIONS \ + $CRIO_METRICS_OPTIONS + ExecReload=/bin/kill -s HUP $MAINPID + TasksMax=infinity + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + OOMScoreAdjust=-999 + TimeoutStartSec=0 + Restart=on-failure + RestartSec=10 + + [Install] + WantedBy=multi-user.target + Alias=cri-o.service + + + >>> host: /etc/crio: + /etc/crio/crio.conf.d/10-crio.conf + [crio.image] + signature_policy = "/etc/crio/policy.json" + + [crio.runtime] + default_runtime = "crun" + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_root = "/run/crun" + monitor_path = "/usr/libexec/crio/conmon" + allowed_annotations = [ + "io.containers.trace-syscall", + ] + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_root = "/run/runc" + monitor_path = "/usr/libexec/crio/conmon" + /etc/crio/crio.conf.d/02-crio.conf + [crio.image] + # pause_image = "" + + [crio.network] + # cni_default_network = "" + + [crio.runtime] + # cgroup_manager = "" + /etc/crio/policy.json + { "default": [{ "type": "insecureAcceptAnything" }] } + + + >>> host: crio config: + INFO[2025-11-02T23:26:31.592942145Z] Updating config from single file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:31.592970752Z] Updating config from drop-in file: /etc/crio/crio.conf + INFO[2025-11-02T23:26:31.593004512Z] Skipping not-existing config file "/etc/crio/crio.conf" + INFO[2025-11-02T23:26:31.593032333Z] Updating config from path: /etc/crio/crio.conf.d + INFO[2025-11-02T23:26:31.593074466Z] Updating config from drop-in file: /etc/crio/crio.conf.d/02-crio.conf + INFO[2025-11-02T23:26:31.593203235Z] Updating config from drop-in file: /etc/crio/crio.conf.d/10-crio.conf + INFO Using default capabilities: CAP_CHOWN, CAP_DAC_OVERRIDE, CAP_FSETID, CAP_FOWNER, CAP_SETGID, CAP_SETUID, CAP_SETPCAP, CAP_NET_BIND_SERVICE, CAP_KILL + # The CRI-O configuration file specifies all of the available configuration + # options and command-line flags for the crio(8) OCI Kubernetes Container Runtime + # daemon, but in a TOML format that can be more easily modified and versioned. + # + # Please refer to crio.conf(5) for details of all configuration options. + + # CRI-O supports partial configuration reload during runtime, which can be + # done by sending SIGHUP to the running process. Currently supported options + # are explicitly mentioned with: 'This option supports live configuration + # reload'. + + # CRI-O reads its storage defaults from the containers-storage.conf(5) file + # located at /etc/containers/storage.conf. Modify this storage configuration if + # you want to change the system's defaults. If you want to modify storage just + # for CRI-O, you can change the storage configuration options here. + [crio] + + # Path to the "root directory". CRI-O stores all of its data, including + # containers images, in this directory. + # root = "/var/lib/containers/storage" + + # Path to the "run directory". CRI-O stores all of its state in this directory. + # runroot = "/run/containers/storage" + + # Path to the "imagestore". If CRI-O stores all of its images in this directory differently than Root. + # imagestore = "" + + # Storage driver used to manage the storage of images and containers. Please + # refer to containers-storage.conf(5) to see all available storage drivers. + # storage_driver = "" + + # List to pass options to the storage driver. Please refer to + # containers-storage.conf(5) to see all available storage options. + # storage_option = [ + # ] + + # The default log directory where all logs will go unless directly specified by + # the kubelet. The log directory specified must be an absolute directory. + # log_dir = "/var/log/crio/pods" + + # Location for CRI-O to lay down the temporary version file. + # It is used to check if crio wipe should wipe containers, which should + # always happen on a node reboot + # version_file = "/var/run/crio/version" + + # Location for CRI-O to lay down the persistent version file. + # It is used to check if crio wipe should wipe images, which should + # only happen when CRI-O has been upgraded + # version_file_persist = "" + + # InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts. + # If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations. + # internal_wipe = true + + # InternalRepair is whether CRI-O should check if the container and image storage was corrupted after a sudden restart. + # If it was, CRI-O also attempts to repair the storage. + # internal_repair = true + + # Location for CRI-O to lay down the clean shutdown file. + # It is used to check whether crio had time to sync before shutting down. + # If not found, crio wipe will clear the storage directory. + # clean_shutdown_file = "/var/lib/crio/clean.shutdown" + + # The crio.api table contains settings for the kubelet/gRPC interface. + [crio.api] + + # Path to AF_LOCAL socket on which CRI-O will listen. + # listen = "/var/run/crio/crio.sock" + + # IP address on which the stream server will listen. + # stream_address = "127.0.0.1" + + # The port on which the stream server will listen. If the port is set to "0", then + # CRI-O will allocate a random free port number. + # stream_port = "0" + + # Enable encrypted TLS transport of the stream server. + # stream_enable_tls = false + + # Length of time until open streams terminate due to lack of activity + # stream_idle_timeout = "" + + # Path to the x509 certificate file used to serve the encrypted stream. This + # file can change, and CRI-O will automatically pick up the changes. + # stream_tls_cert = "" + + # Path to the key file used to serve the encrypted stream. This file can + # change and CRI-O will automatically pick up the changes. + # stream_tls_key = "" + + # Path to the x509 CA(s) file used to verify and authenticate client + # communication with the encrypted stream. This file can change and CRI-O will + # automatically pick up the changes. + # stream_tls_ca = "" + + # Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_send_msg_size = 83886080 + + # Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024. + # grpc_max_recv_msg_size = 83886080 + + # The crio.runtime table contains settings pertaining to the OCI runtime used + # and options for how to set up and manage the OCI runtime. + [crio.runtime] + + # A list of ulimits to be set in containers by default, specified as + # "=:", for example: + # "nofile=1024:2048" + # If nothing is set here, settings will be inherited from the CRI-O daemon + # default_ulimits = [ + # ] + + # If true, the runtime will not use pivot_root, but instead use MS_MOVE. + # no_pivot = false + + # decryption_keys_path is the path where the keys required for + # image decryption are stored. This option supports live configuration reload. + # decryption_keys_path = "/etc/crio/keys/" + + # Path to the conmon binary, used for monitoring the OCI runtime. + # Will be searched for using $PATH if empty. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon = "" + + # Cgroup setting for conmon + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup. + # conmon_cgroup = "" + + # Environment variable list for the conmon process, used for passing necessary + # environment variables to conmon or the runtime. + # This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv. + # conmon_env = [ + # ] + + # Additional environment variables to set for all the + # containers. These are overridden if set in the + # container image spec or in the container runtime configuration. + # default_env = [ + # ] + + # If true, SELinux will be used for pod separation on the host. + # This option is deprecated, and be interpreted from whether SELinux is enabled on the host in the future. + # selinux = false + + # Path to the seccomp.json profile which is used as the default seccomp profile + # for the runtime. If not specified or set to "", then the internal default seccomp profile will be used. + # This option supports live configuration reload. + # seccomp_profile = "" + + # Enable a seccomp profile for privileged containers from the local path. + # This option supports live configuration reload. + # privileged_seccomp_profile = "" + + # Used to change the name of the default AppArmor profile of CRI-O. The default + # profile name is "crio-default". This profile only takes effect if the user + # does not specify a profile via the Kubernetes Pod's metadata annotation. If + # the profile is set to "unconfined", then this equals to disabling AppArmor. + # This option supports live configuration reload. + # apparmor_profile = "crio-default" + + # Path to the blockio class configuration file for configuring + # the cgroup blockio controller. + # blockio_config_file = "" + + # Reload blockio-config-file and rescan blockio devices in the system before applying + # blockio parameters. + # blockio_reload = false + + # Used to change irqbalance service config file path which is used for configuring + # irqbalance daemon. + # irqbalance_config_file = "/etc/sysconfig/irqbalance" + + # irqbalance_config_restore_file allows to set a cpu mask CRI-O should + # restore as irqbalance config at startup. Set to empty string to disable this flow entirely. + # By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning. + # irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus" + + # Path to the RDT configuration file for configuring the resctrl pseudo-filesystem. + # This option supports live configuration reload. + # rdt_config_file = "" + + # Cgroup management implementation used for the runtime. + # cgroup_manager = "systemd" + + # Specify whether the image pull must be performed in a separate cgroup. + # separate_pull_cgroup = "" + + # List of default capabilities for containers. If it is empty or commented out, + # only the capabilities defined in the containers json file by the user/kube + # will be added. + # default_capabilities = [ + # "CHOWN", + # "DAC_OVERRIDE", + # "FSETID", + # "FOWNER", + # "SETGID", + # "SETUID", + # "SETPCAP", + # "NET_BIND_SERVICE", + # "KILL", + # ] + + # Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective. + # If capabilities are expected to work for non-root users, this option should be set. + # add_inheritable_capabilities = false + + # List of default sysctls. If it is empty or commented out, only the sysctls + # defined in the container json file by the user/kube will be added. + # default_sysctls = [ + # ] + + # List of devices on the host that a + # user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation. + # allowed_devices = [ + # "/dev/fuse", + # "/dev/net/tun", + # ] + + # List of additional devices. specified as + # "::", for example: "--device=/dev/sdc:/dev/xvdc:rwm". + # If it is empty or commented out, only the devices + # defined in the container json file by the user/kube will be added. + # additional_devices = [ + # ] + + # List of directories to scan for CDI Spec files. + # cdi_spec_dirs = [ + # "/etc/cdi", + # "/var/run/cdi", + # ] + + # Change the default behavior of setting container devices uid/gid from CRI's + # SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid. + # Defaults to false. + # device_ownership_from_security_context = false + + # Path to OCI hooks directories for automatically executed hooks. If one of the + # directories does not exist, then CRI-O will automatically skip them. + # hooks_dir = [ + # "/usr/share/containers/oci/hooks.d", + # ] + + # Path to the file specifying the defaults mounts for each container. The + # format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads + # its default mounts from the following two files: + # + # 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the + # override file, where users can either add in their own default mounts, or + # override the default mounts shipped with the package. + # + # 2) /usr/share/containers/mounts.conf: This is the default file read for + # mounts. If you want CRI-O to read from a different, specific mounts file, + # you can change the default_mounts_file. Note, if this is done, CRI-O will + # only add mounts it finds in this file. + # + # default_mounts_file = "" + + # Maximum number of processes allowed in a container. + # This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead. + # pids_limit = -1 + + # Maximum sized allowed for the container log file. Negative numbers indicate + # that no size limit is imposed. If it is positive, it must be >= 8192 to + # match/exceed conmon's read buffer. The file is truncated and re-opened so the + # limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead. + # log_size_max = -1 + + # Whether container output should be logged to journald in addition to the kubernetes log file + # log_to_journald = false + + # Path to directory in which container exit files are written to by conmon. + # container_exits_dir = "/var/run/crio/exits" + + # Path to directory for container attach sockets. + # container_attach_socket_dir = "/var/run/crio" + + # The prefix to use for the source of the bind mounts. + # bind_mount_prefix = "" + + # If set to true, all containers will run in read-only mode. + # read_only = false + + # Changes the verbosity of the logs based on the level it is set to. Options + # are fatal, panic, error, warn, info, debug and trace. This option supports + # live configuration reload. + # log_level = "info" + + # Filter the log messages by the provided regular expression. + # This option supports live configuration reload. + # log_filter = "" + + # The UID mappings for the user namespace of each container. A range is + # specified in the form containerUID:HostUID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # uid_mappings = "" + + # The GID mappings for the user namespace of each container. A range is + # specified in the form containerGID:HostGID:Size. Multiple ranges must be + # separated by comma. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # gid_mappings = "" + + # If set, CRI-O will reject any attempt to map host UIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_uid = -1 + + # If set, CRI-O will reject any attempt to map host GIDs below this value + # into user namespaces. A negative value indicates that no minimum is set, + # so specifying mappings will only be allowed for pods that run as UID 0. + # This option is deprecated, and will be replaced with Kubernetes user namespace support (KEP-127) in the future. + # minimum_mappable_gid = -1 + + # The minimal amount of time in seconds to wait before issuing a timeout + # regarding the proper termination of the container. The lowest possible + # value is 30s, whereas lower values are not considered by CRI-O. + # ctr_stop_timeout = 30 + + # drop_infra_ctr determines whether CRI-O drops the infra container + # when a pod does not have a private PID namespace, and does not use + # a kernel separating runtime (like kata). + # It requires manage_ns_lifecycle to be true. + # drop_infra_ctr = true + + # infra_ctr_cpuset determines what CPUs will be used to run infra containers. + # You can use linux CPU list format to specify desired CPUs. + # To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus. + # infra_ctr_cpuset = "" + + # shared_cpuset determines the CPU set which is allowed to be shared between guaranteed containers, + # regardless of, and in addition to, the exclusiveness of their CPUs. + # This field is optional and would not be used if not specified. + # You can specify CPUs in the Linux CPU list format. + # shared_cpuset = "" + + # The directory where the state of the managed namespaces gets tracked. + # Only used when manage_ns_lifecycle is true. + # namespaces_dir = "/var/run" + + # pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle + # pinns_path = "" + + # Globally enable/disable CRIU support which is necessary to + # checkpoint and restore container or pods (even if CRIU is found in $PATH). + # enable_criu_support = true + + # Enable/disable the generation of the container, + # sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG + # enable_pod_events = false + + # default_runtime is the _name_ of the OCI runtime to be used as the default. + # The name is matched against the runtimes map below. + # default_runtime = "crun" + + # A list of paths that, when absent from the host, + # will cause a container creation to fail (as opposed to the current behavior being created as a directory). + # This option is to protect from source locations whose existence as a directory could jeopardize the health of the node, and whose + # creation as a file is not desired either. + # An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because + # the hostname is being managed dynamically. + # absent_mount_sources_to_reject = [ + # ] + + # The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes. + # The runtime to use is picked based on the runtime handler provided by the CRI. + # If no runtime handler is provided, the "default_runtime" will be used. + # Each entry in the table should follow the format: + # + # [crio.runtime.runtimes.runtime-handler] + # runtime_path = "/path/to/the/executable" + # runtime_type = "oci" + # runtime_root = "/path/to/the/root" + # inherit_default_runtime = false + # monitor_path = "/path/to/container/monitor" + # monitor_cgroup = "/cgroup/path" + # monitor_exec_cgroup = "/cgroup/path" + # monitor_env = [] + # privileged_without_host_devices = false + # allowed_annotations = [] + # platform_runtime_paths = { "os/arch" = "/path/to/binary" } + # no_sync_log = false + # default_annotations = {} + # stream_websockets = false + # seccomp_profile = "" + # Where: + # - runtime-handler: Name used to identify the runtime. + # - runtime_path (optional, string): Absolute path to the runtime executable in + # the host filesystem. If omitted, the runtime-handler identifier should match + # the runtime executable name, and the runtime executable should be placed + # in $PATH. + # - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If + # omitted, an "oci" runtime is assumed. + # - runtime_root (optional, string): Root directory for storage of containers + # state. + # - runtime_config_path (optional, string): the path for the runtime configuration + # file. This can only be used with when using the VM runtime_type. + # - inherit_default_runtime (optional, bool): when true the runtime_path, + # runtime_type, runtime_root and runtime_config_path will be replaced by + # the values from the default runtime on load time. + # - privileged_without_host_devices (optional, bool): an option for restricting + # host devices from being passed to privileged containers. + # - allowed_annotations (optional, array of strings): an option for specifying + # a list of experimental annotations that this runtime handler is allowed to process. + # The currently recognized values are: + # "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod. + # "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true". + # "io.kubernetes.cri-o.Devices" for configuring devices for the pod. + # "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm. + # "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container. + # "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook. + # "io.kubernetes.cri-o.seccompNotifierAction" for enabling the seccomp notifier feature. + # "io.kubernetes.cri-o.umask" for setting the umask for container init process. + # "io.kubernetes.cri.rdt-class" for setting the RDT class of a container + # "seccomp-profile.kubernetes.cri-o.io" for setting the seccomp profile for: + # - a specific container by using: "seccomp-profile.kubernetes.cri-o.io/" + # - a whole pod by using: "seccomp-profile.kubernetes.cri-o.io/POD" + # Note that the annotation works on containers as well as on images. + # For images, the plain annotation "seccomp-profile.kubernetes.cri-o.io" + # can be used without the required "/POD" suffix or a container name. + # "io.kubernetes.cri-o.DisableFIPS" for disabling FIPS mode in a Kubernetes pod within a FIPS-enabled cluster. + # - monitor_path (optional, string): The path of the monitor binary. Replaces + # deprecated option "conmon". + # - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in. + # Replaces deprecated option "conmon_cgroup". + # - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes + # should be moved to the container's cgroup + # - monitor_env (optional, array of strings): Environment variables to pass to the monitor. + # Replaces deprecated option "conmon_env". + # When using the pod runtime and conmon-rs, then the monitor_env can be used to further configure + # conmon-rs by using: + # - LOG_DRIVER=[none,systemd,stdout] - Enable logging to the configured target, defaults to none. + # - HEAPTRACK_OUTPUT_PATH=/path/to/dir - Enable heaptrack profiling and save the files to the set directory. + # - HEAPTRACK_BINARY_PATH=/path/to/heaptrack - Enable heaptrack profiling and use set heaptrack binary. + # - platform_runtime_paths (optional, map): A mapping of platforms to the corresponding + # runtime executable paths for the runtime handler. + # - container_min_memory (optional, string): The minimum memory that must be set for a container. + # This value can be used to override the currently set global value for a specific runtime. If not set, + # a global default value of "12 MiB" will be used. + # - no_sync_log (optional, bool): If set to true, the runtime will not sync the log file on rotate or container exit. + # This option is only valid for the 'oci' runtime type. Setting this option to true can cause data loss, e.g. + # when a machine crash happens. + # - default_annotations (optional, map): Default annotations if not overridden by the pod spec. + # - stream_websockets (optional, bool): Enable the WebSocket protocol for container exec, attach and port forward. + # - seccomp_profile (optional, string): The absolute path of the seccomp.json profile which is used as the default + # seccomp profile for the runtime. + # If not specified or set to "", the runtime seccomp_profile will be used. + # If that is also not specified or set to "", the internal default seccomp profile will be applied. + # + # Using the seccomp notifier feature: + # + # This feature can help you to debug seccomp related issues, for example if + # blocked syscalls (permission denied errors) have negative impact on the workload. + # + # To be able to use this feature, configure a runtime which has the annotation + # "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array. + # + # It also requires at least runc 1.1.0 or crun 0.19 which support the notifier + # feature. + # + # If everything is setup, CRI-O will modify chosen seccomp profiles for + # containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is + # set on the Pod sandbox. CRI-O will then get notified if a container is using + # a blocked syscall and then terminate the workload after a timeout of 5 + # seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop". + # + # This also means that multiple syscalls can be captured during that period, + # while the timeout will get reset once a new syscall has been discovered. + # + # This also means that the Pods "restartPolicy" has to be set to "Never", + # otherwise the kubelet will restart the container immediately. + # + # Please be aware that CRI-O is not able to get notified if a syscall gets + # blocked based on the seccomp defaultAction, which is a general runtime + # limitation. + + + [crio.runtime.runtimes.crun] + runtime_path = "/usr/libexec/crio/crun" + runtime_type = "" + runtime_root = "/run/crun" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + allowed_annotations = [ + "io.containers.trace-syscall", + ] + privileged_without_host_devices = false + + + + [crio.runtime.runtimes.runc] + runtime_path = "/usr/libexec/crio/runc" + runtime_type = "" + runtime_root = "/run/runc" + inherit_default_runtime = false + runtime_config_path = "" + container_min_memory = "" + monitor_path = "/usr/libexec/crio/conmon" + monitor_cgroup = "system.slice" + monitor_exec_cgroup = "" + + + privileged_without_host_devices = false + + + + # The workloads table defines ways to customize containers with different resources + # that work based on annotations, rather than the CRI. + # Note, the behavior of this table is EXPERIMENTAL and may change at any time. + # Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating. + # The currently supported resources are "cpuperiod" "cpuquota", "cpushares", "cpulimit" and "cpuset". The values for "cpuperiod" and "cpuquota" are denoted in microseconds. + # The value for "cpulimit" is denoted in millicores, this value is used to calculate the "cpuquota" with the supplied "cpuperiod" or the default "cpuperiod". + # Note that the "cpulimit" field overrides the "cpuquota" value supplied in this configuration. + # Each resource can have a default value specified, or be empty. + # For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored). + # To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified + # signifying for that resource type to override the default value. + # If the annotation_prefix is not present, every container in the pod will be given the default values. + # Example: + # [crio.runtime.workloads.workload-type] + # activation_annotation = "io.crio/workload" + # annotation_prefix = "io.crio.workload-type" + # [crio.runtime.workloads.workload-type.resources] + # cpuset = "0-1" + # cpushares = "5" + # cpuquota = "1000" + # cpuperiod = "100000" + # cpulimit = "35" + # Where: + # The workload name is workload-type. + # To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match). + # This workload supports setting cpuset and cpu resources. + # annotation_prefix is used to customize the different resources. + # To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation: + # "io.crio.workload-type/$container_name = {"cpushares": "value"}" + + # hostnetwork_disable_selinux determines whether + # SELinux should be disabled within a pod when it is running in the host network namespace + # Default value is set to true + # hostnetwork_disable_selinux = true + + # disable_hostport_mapping determines whether to enable/disable + # the container hostport mapping in CRI-O. + # Default value is set to 'false' + # disable_hostport_mapping = false + + # timezone To set the timezone for a container in CRI-O. + # If an empty string is provided, CRI-O retains its default behavior. Use 'Local' to match the timezone of the host machine. + # timezone = "" + + # The crio.image table contains settings pertaining to the management of OCI images. + # + # CRI-O reads its configured registries defaults from the system wide + # containers-registries.conf(5) located in /etc/containers/registries.conf. + [crio.image] + + # Default transport for pulling images from a remote container storage. + # default_transport = "docker://" + + # The path to a file containing credentials necessary for pulling images from + # secure registries. The file is similar to that of /var/lib/kubelet/config.json + # global_auth_file = "" + + # The image used to instantiate infra containers. + # This option supports live configuration reload. + # pause_image = "registry.k8s.io/pause:3.10.1" + + # The path to a file containing credentials specific for pulling the pause_image from + # above. The file is similar to that of /var/lib/kubelet/config.json + # This option supports live configuration reload. + # pause_image_auth_file = "" + + # The command to run to have a container stay in the paused state. + # When explicitly set to "", it will fallback to the entrypoint and command + # specified in the pause image. When commented out, it will fallback to the + # default: "/pause". This option supports live configuration reload. + # pause_command = "/pause" + + # List of images to be excluded from the kubelet's garbage collection. + # It allows specifying image names using either exact, glob, or keyword + # patterns. Exact matches must match the entire name, glob matches can + # have a wildcard * at the end, and keyword matches can have wildcards + # on both ends. By default, this list includes the "pause" image if + # configured by the user, which is used as a placeholder in Kubernetes pods. + # pinned_images = [ + # ] + + # Path to the file which decides what sort of policy we use when deciding + # whether or not to trust an image that we've pulled. It is not recommended that + # this option be used, as the default behavior of using the system-wide default + # policy (i.e., /etc/containers/policy.json) is most often preferred. Please + # refer to containers-policy.json(5) for more details. + signature_policy = "/etc/crio/policy.json" + + # Root path for pod namespace-separated signature policies. + # The final policy to be used on image pull will be /.json. + # If no pod namespace is being provided on image pull (via the sandbox config), + # or the concatenated path is non existent, then the signature_policy or system + # wide policy will be used as fallback. Must be an absolute path. + # signature_policy_dir = "/etc/crio/policies" + + # List of registries to skip TLS verification for pulling images. Please + # consider configuring the registries via /etc/containers/registries.conf before + # changing them here. + # This option is deprecated. Use registries.conf file instead. + # insecure_registries = [ + # ] + + # Controls how image volumes are handled. The valid values are mkdir, bind and + # ignore; the latter will ignore volumes entirely. + # image_volumes = "mkdir" + + # Temporary directory to use for storing big files + # big_files_temporary_dir = "" + + # If true, CRI-O will automatically reload the mirror registry when + # there is an update to the 'registries.conf.d' directory. Default value is set to 'false'. + # auto_reload_registries = false + + # The timeout for an image pull to make progress until the pull operation + # gets canceled. This value will be also used for calculating the pull progress interval to pull_progress_timeout / 10. + # Can be set to 0 to disable the timeout as well as the progress output. + # pull_progress_timeout = "0s" + + # The mode of short name resolution. + # The valid values are "enforcing" and "disabled", and the default is "enforcing". + # If "enforcing", an image pull will fail if a short name is used, but the results are ambiguous. + # If "disabled", the first result will be chosen. + # short_name_mode = "enforcing" + + # OCIArtifactMountSupport is whether CRI-O should support OCI artifacts. + # If set to false, mounting OCI Artifacts will result in an error. + # oci_artifact_mount_support = true + # The crio.network table containers settings pertaining to the management of + # CNI plugins. + [crio.network] + + # The default CNI network name to be selected. If not set or "", then + # CRI-O will pick-up the first one found in network_dir. + # cni_default_network = "" + + # Path to the directory where CNI configuration files are located. + # network_dir = "/etc/cni/net.d/" + + # Paths to directories where CNI plugin binaries are located. + # plugin_dirs = [ + # "/opt/cni/bin/", + # ] + + # List of included pod metrics. + # included_pod_metrics = [ + # ] + + # A necessary configuration for Prometheus based metrics retrieval + [crio.metrics] + + # Globally enable or disable metrics support. + # enable_metrics = false + + # Specify enabled metrics collectors. + # Per default all metrics are enabled. + # It is possible, to prefix the metrics with "container_runtime_" and "crio_". + # For example, the metrics collector "operations" would be treated in the same + # way as "crio_operations" and "container_runtime_crio_operations". + # metrics_collectors = [ + # "image_pulls_layer_size", + # "containers_events_dropped_total", + # "containers_oom_total", + # "processes_defunct", + # "operations_total", + # "operations_latency_seconds", + # "operations_latency_seconds_total", + # "operations_errors_total", + # "image_pulls_bytes_total", + # "image_pulls_skipped_bytes_total", + # "image_pulls_failure_total", + # "image_pulls_success_total", + # "image_layer_reuse_total", + # "containers_oom_count_total", + # "containers_seccomp_notifier_count_total", + # "resources_stalled_at_stage", + # "containers_stopped_monitor_count", + # ] + # The IP address or hostname on which the metrics server will listen. + # metrics_host = "127.0.0.1" + + # The port on which the metrics server will listen. + # metrics_port = 9090 + + # Local socket path to bind the metrics server to + # metrics_socket = "" + + # The certificate for the secure metrics server. + # If the certificate is not available on disk, then CRI-O will generate a + # self-signed one. CRI-O also watches for changes of this path and reloads the + # certificate on any modification event. + # metrics_cert = "" + + # The certificate key for the secure metrics server. + # Behaves in the same way as the metrics_cert. + # metrics_key = "" + + # A necessary configuration for OpenTelemetry trace data exporting + [crio.tracing] + + # Globally enable or disable exporting OpenTelemetry traces. + # enable_tracing = false + + # Address on which the gRPC trace collector listens on. + # tracing_endpoint = "127.0.0.1:4317" + + # Number of samples to collect per million spans. Set to 1000000 to always sample. + # tracing_sampling_rate_per_million = 0 + + # CRI-O NRI configuration. + [crio.nri] + + # Globally enable or disable NRI. + # enable_nri = true + + # NRI socket to listen on. + # nri_listen = "/var/run/nri/nri.sock" + + # NRI plugin directory to use. + # nri_plugin_dir = "/opt/nri/plugins" + + # NRI plugin configuration directory to use. + # nri_plugin_config_dir = "/etc/nri/conf.d" + + # Disable connections from externally launched NRI plugins. + # nri_disable_connections = false + + # Timeout for a plugin to register itself with NRI. + # nri_plugin_registration_timeout = "5s" + + # Timeout for a plugin to handle an NRI request. + # nri_plugin_request_timeout = "2s" + + # NRI default validator configuration. + # If enabled, the builtin default validator can be used to reject a container if some + # NRI plugin requested a restricted adjustment. Currently the following adjustments + # can be restricted/rejected: + # - OCI hook injection + # - adjustment of runtime default seccomp profile + # - adjustment of unconfied seccomp profile + # - adjustment of a custom seccomp profile + # - adjustment of linux namespaces + # Additionally, the default validator can be used to reject container creation if any + # of a required set of plugins has not processed a container creation request, unless + # the container has been annotated to tolerate a missing plugin. + # + # [crio.nri.default_validator] + # nri_enable_default_validator = false + # nri_validator_reject_oci_hook_adjustment = false + # nri_validator_reject_runtime_default_seccomp_adjustment = false + # nri_validator_reject_unconfined_seccomp_adjustment = false + # nri_validator_reject_custom_seccomp_adjustment = false + # nri_validator_reject_namespace_adjustment = false + # nri_validator_required_plugins = [ + # ] + # nri_validator_tolerate_missing_plugins_annotation = "" + + # Necessary information pertaining to container and pod stats reporting. + [crio.stats] + + # The number of seconds between collecting pod and container stats. + # If set to 0, the stats are collected on-demand instead. + # stats_collection_period = 0 + + # The number of seconds between collecting pod/container stats and pod + # sandbox metrics. If set to 0, the metrics/stats are collected on-demand instead. + # collection_period = 0 + + + ----------------------- debugLogs end: enable-default-cni-999044 [took: 16.102291064s] -------------------------------- + helpers_test.go:175: Cleaning up "enable-default-cni-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p enable-default-cni-999044 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p enable-default-cni-999044: (1.734121779s) +=== CONT TestStartStop/group/newest-cni +=== RUN TestStartStop/group/newest-cni/serial +=== RUN TestStartStop/group/newest-cni/serial/FirstStart + start_stop_delete_test.go:184: (dbg) Run: out/minikube-linux-amd64 start -p newest-cni-323659 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-736872 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.28.0: (38.019895815s) +=== RUN TestStartStop/group/old-k8s-version/serial/DeployApp + start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-736872 create -f testdata/busybox.yaml + start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... + helpers_test.go:352: "busybox" [a0451908-aadd-4d4b-b6ca-2241659d9b28] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) +E1102 23:26:49.536870 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + helpers_test.go:352: "busybox" [a0451908-aadd-4d4b-b6ca-2241659d9b28] Running + start_stop_delete_test.go:194: (dbg) TestStartStop/group/old-k8s-version/serial/DeployApp: integration-test=busybox healthy within 7.002646649s + start_stop_delete_test.go:194: (dbg) Run: kubectl --context old-k8s-version-736872 exec busybox -- /bin/sh -c "ulimit -n" +=== RUN TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive + start_stop_delete_test.go:203: (dbg) Run: out/minikube-linux-amd64 addons enable metrics-server -p old-k8s-version-736872 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain + start_stop_delete_test.go:213: (dbg) Run: kubectl --context old-k8s-version-736872 describe deploy/metrics-server -n kube-system +=== RUN TestStartStop/group/old-k8s-version/serial/Stop + start_stop_delete_test.go:226: (dbg) Run: out/minikube-linux-amd64 stop -p old-k8s-version-736872 --alsologtostderr -v=3 + start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-323659 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (25.308965248s) +=== RUN TestStartStop/group/newest-cni/serial/DeployApp +=== RUN TestStartStop/group/newest-cni/serial/EnableAddonWhileActive + start_stop_delete_test.go:203: (dbg) Run: out/minikube-linux-amd64 addons enable metrics-server -p newest-cni-323659 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain + start_stop_delete_test.go:209: WARNING: cni mode requires additional setup before pods can schedule :( +=== RUN TestStartStop/group/newest-cni/serial/Stop + start_stop_delete_test.go:226: (dbg) Run: out/minikube-linux-amd64 stop -p newest-cni-323659 --alsologtostderr -v=3 + start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p old-k8s-version-736872 --alsologtostderr -v=3: (10.47654567s) +=== RUN TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop + start_stop_delete_test.go:237: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-736872 -n old-k8s-version-736872 + start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-736872 -n old-k8s-version-736872: exit status 7 (39.647766ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:237: status error: exit status 7 (may be ok) + start_stop_delete_test.go:244: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p old-k8s-version-736872 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 +=== RUN TestStartStop/group/old-k8s-version/serial/SecondStart + start_stop_delete_test.go:254: (dbg) Run: out/minikube-linux-amd64 start -p old-k8s-version-736872 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.28.0 + start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p newest-cni-323659 --alsologtostderr -v=3: (10.492533s) +=== RUN TestStartStop/group/newest-cni/serial/EnableAddonAfterStop + start_stop_delete_test.go:237: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-323659 -n newest-cni-323659 + start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-323659 -n newest-cni-323659: exit status 7 (40.976773ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:237: status error: exit status 7 (may be ok) + start_stop_delete_test.go:244: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p newest-cni-323659 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 +=== RUN TestStartStop/group/newest-cni/serial/SecondStart + start_stop_delete_test.go:254: (dbg) Run: out/minikube-linux-amd64 start -p newest-cni-323659 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p newest-cni-323659 --memory=3072 --alsologtostderr --wait=apiserver,system_pods,default_sa --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=10.42.0.0/16 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (9.327584153s) + start_stop_delete_test.go:260: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p newest-cni-323659 -n newest-cni-323659 +=== RUN TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop + start_stop_delete_test.go:271: WARNING: cni mode requires additional setup before pods can schedule :( +=== RUN TestStartStop/group/newest-cni/serial/AddonExistsAfterStop + start_stop_delete_test.go:282: WARNING: cni mode requires additional setup before pods can schedule :( +=== RUN TestStartStop/group/newest-cni/serial/VerifyKubernetesImages + start_stop_delete_test.go:302: (dbg) Run: out/minikube-linux-amd64 -p newest-cni-323659 image list --format=json + start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-162995 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (1m8.746326877s) +=== RUN TestStartStop/group/no-preload/serial/DeployApp + start_stop_delete_test.go:194: (dbg) Run: kubectl --context no-preload-162995 create -f testdata/busybox.yaml +=== RUN TestStartStop/group/newest-cni/serial/Pause + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p newest-cni-323659 --alsologtostderr -v=1 + start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... + helpers_test.go:352: "busybox" [cf17fc1b-dc07-4815-9a77-3cdd7e1fa790] Pending + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-323659 -n newest-cni-323659 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-323659 -n newest-cni-323659: exit status 2 (213.833847ms) + + -- stdout -- + Paused + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-323659 -n newest-cni-323659 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-323659 -n newest-cni-323659: exit status 2 (199.425846ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p newest-cni-323659 --alsologtostderr -v=1 + helpers_test.go:352: "busybox" [cf17fc1b-dc07-4815-9a77-3cdd7e1fa790] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p newest-cni-323659 -n newest-cni-323659 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p newest-cni-323659 -n newest-cni-323659 + start_stop_delete_test.go:160: (dbg) Run: out/minikube-linux-amd64 delete -p newest-cni-323659 + helpers_test.go:352: "busybox" [cf17fc1b-dc07-4815-9a77-3cdd7e1fa790] Running + start_stop_delete_test.go:160: (dbg) Done: out/minikube-linux-amd64 delete -p newest-cni-323659: (1.700694144s) + start_stop_delete_test.go:165: (dbg) Run: kubectl config get-contexts newest-cni-323659 + start_stop_delete_test.go:165: (dbg) Non-zero exit: kubectl config get-contexts newest-cni-323659: exit status 1 (28.453693ms) + + -- stdout -- + CURRENT NAME CLUSTER AUTHINFO NAMESPACE + + -- /stdout -- + ** stderr ** + error: context newest-cni-323659 not found + + ** /stderr ** + start_stop_delete_test.go:167: config context error: exit status 1 (may be ok) + helpers_test.go:175: Cleaning up "newest-cni-323659" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p newest-cni-323659 +=== CONT TestStartStop/group/disable-driver-mounts + start_stop_delete_test.go:101: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox + helpers_test.go:175: Cleaning up "disable-driver-mounts-452112" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p disable-driver-mounts-452112 +=== CONT TestStartStop/group/embed-certs +=== RUN TestStartStop/group/embed-certs/serial +=== RUN TestStartStop/group/embed-certs/serial/FirstStart + start_stop_delete_test.go:184: (dbg) Run: out/minikube-linux-amd64 start -p embed-certs-155711 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-150516 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (1m5.558107478s) +=== RUN TestStartStop/group/default-k8s-diff-port/serial/DeployApp + start_stop_delete_test.go:194: (dbg) Run: kubectl --context default-k8s-diff-port-150516 create -f testdata/busybox.yaml + start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... + helpers_test.go:352: "busybox" [3aac83af-c435-47ff-ba2a-e1b77b77f4af] Pending + start_stop_delete_test.go:194: (dbg) TestStartStop/group/no-preload/serial/DeployApp: integration-test=busybox healthy within 8.003363656s + start_stop_delete_test.go:194: (dbg) Run: kubectl --context no-preload-162995 exec busybox -- /bin/sh -c "ulimit -n" +=== RUN TestStartStop/group/no-preload/serial/EnableAddonWhileActive + start_stop_delete_test.go:203: (dbg) Run: out/minikube-linux-amd64 addons enable metrics-server -p no-preload-162995 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain + start_stop_delete_test.go:213: (dbg) Run: kubectl --context no-preload-162995 describe deploy/metrics-server -n kube-system +=== RUN TestStartStop/group/no-preload/serial/Stop + start_stop_delete_test.go:226: (dbg) Run: out/minikube-linux-amd64 stop -p no-preload-162995 --alsologtostderr -v=3 + helpers_test.go:352: "busybox" [3aac83af-c435-47ff-ba2a-e1b77b77f4af] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) + helpers_test.go:352: "busybox" [3aac83af-c435-47ff-ba2a-e1b77b77f4af] Running + start_stop_delete_test.go:194: (dbg) TestStartStop/group/default-k8s-diff-port/serial/DeployApp: integration-test=busybox healthy within 8.002449048s + start_stop_delete_test.go:194: (dbg) Run: kubectl --context default-k8s-diff-port-150516 exec busybox -- /bin/sh -c "ulimit -n" +=== RUN TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive + start_stop_delete_test.go:203: (dbg) Run: out/minikube-linux-amd64 addons enable metrics-server -p default-k8s-diff-port-150516 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain + start_stop_delete_test.go:213: (dbg) Run: kubectl --context default-k8s-diff-port-150516 describe deploy/metrics-server -n kube-system +=== RUN TestStartStop/group/default-k8s-diff-port/serial/Stop + start_stop_delete_test.go:226: (dbg) Run: out/minikube-linux-amd64 stop -p default-k8s-diff-port-150516 --alsologtostderr -v=3 + start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p no-preload-162995 --alsologtostderr -v=3: (10.48796486s) +=== RUN TestStartStop/group/no-preload/serial/EnableAddonAfterStop + start_stop_delete_test.go:237: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-162995 -n no-preload-162995: exit status 7 (43.128301ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:237: status error: exit status 7 (may be ok) + start_stop_delete_test.go:244: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p no-preload-162995 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 +=== RUN TestStartStop/group/no-preload/serial/SecondStart + start_stop_delete_test.go:254: (dbg) Run: out/minikube-linux-amd64 start -p no-preload-162995 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 +E1102 23:27:45.031559 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p default-k8s-diff-port-150516 --alsologtostderr -v=3: (10.513802881s) +=== RUN TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop + start_stop_delete_test.go:237: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 + start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516: exit status 7 (46.3959ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:237: status error: exit status 7 (may be ok) + start_stop_delete_test.go:244: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p default-k8s-diff-port-150516 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 +=== RUN TestStartStop/group/default-k8s-diff-port/serial/SecondStart + start_stop_delete_test.go:254: (dbg) Run: out/minikube-linux-amd64 start -p default-k8s-diff-port-150516 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 + start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p old-k8s-version-736872 --memory=3072 --alsologtostderr --wait=true --kvm-network=default --kvm-qemu-uri=qemu:///system --disable-driver-mounts --keep-context=false --driver=docker --container-runtime=docker --kubernetes-version=v1.28.0: (46.877532286s) + start_stop_delete_test.go:260: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p old-k8s-version-736872 -n old-k8s-version-736872 +=== RUN TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop + start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-8694d4445c-bkqb2" [8c07a789-c2d9-4eea-989c-fbe126f4b5f3] Running + start_stop_delete_test.go:272: (dbg) TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.002965358s +=== RUN TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop + start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-8694d4445c-bkqb2" [8c07a789-c2d9-4eea-989c-fbe126f4b5f3] Running + start_stop_delete_test.go:285: (dbg) TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.002321475s + start_stop_delete_test.go:289: (dbg) Run: kubectl --context old-k8s-version-736872 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard +=== RUN TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages + start_stop_delete_test.go:302: (dbg) Run: out/minikube-linux-amd64 -p old-k8s-version-736872 image list --format=json + start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc +=== RUN TestStartStop/group/old-k8s-version/serial/Pause + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p old-k8s-version-736872 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-736872 -n old-k8s-version-736872 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-736872 -n old-k8s-version-736872: exit status 2 (208.508987ms) + + -- stdout -- + Paused + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-736872 -n old-k8s-version-736872 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-736872 -n old-k8s-version-736872: exit status 2 (201.928617ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p old-k8s-version-736872 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p old-k8s-version-736872 -n old-k8s-version-736872 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p old-k8s-version-736872 -n old-k8s-version-736872 + start_stop_delete_test.go:160: (dbg) Run: out/minikube-linux-amd64 delete -p old-k8s-version-736872 + start_stop_delete_test.go:160: (dbg) Done: out/minikube-linux-amd64 delete -p old-k8s-version-736872: (1.746218675s) + start_stop_delete_test.go:165: (dbg) Run: kubectl config get-contexts old-k8s-version-736872 + start_stop_delete_test.go:165: (dbg) Non-zero exit: kubectl config get-contexts old-k8s-version-736872: exit status 1 (28.231195ms) + + -- stdout -- + CURRENT NAME CLUSTER AUTHINFO NAMESPACE + + -- /stdout -- + ** stderr ** + error: context old-k8s-version-736872 not found + + ** /stderr ** + start_stop_delete_test.go:167: config context error: exit status 1 (may be ok) + helpers_test.go:175: Cleaning up "old-k8s-version-736872" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p old-k8s-version-736872 +--- PASS: TestNetworkPlugins (489.10s) + --- PASS: TestNetworkPlugins/group (1.74s) + --- SKIP: TestNetworkPlugins/group/cilium (1.74s) + --- PASS: TestNetworkPlugins/group/auto (81.50s) + --- PASS: TestNetworkPlugins/group/auto/Start (55.69s) + --- PASS: TestNetworkPlugins/group/auto/KubeletFlags (0.21s) + --- PASS: TestNetworkPlugins/group/auto/NetCatPod (10.31s) + --- PASS: TestNetworkPlugins/group/auto/DNS (0.13s) + --- PASS: TestNetworkPlugins/group/auto/Localhost (0.11s) + --- PASS: TestNetworkPlugins/group/auto/HairPin (0.11s) + --- PASS: TestNetworkPlugins/group/calico (72.86s) + --- PASS: TestNetworkPlugins/group/calico/Start (42.53s) + --- PASS: TestNetworkPlugins/group/calico/ControllerPod (6.01s) + --- PASS: TestNetworkPlugins/group/calico/KubeletFlags (0.19s) + --- PASS: TestNetworkPlugins/group/calico/NetCatPod (9.10s) + --- PASS: TestNetworkPlugins/group/calico/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/calico/Localhost (0.06s) + --- PASS: TestNetworkPlugins/group/calico/HairPin (0.07s) + --- PASS: TestNetworkPlugins/group/custom-flannel (71.02s) + --- PASS: TestNetworkPlugins/group/custom-flannel/Start (46.16s) + --- PASS: TestNetworkPlugins/group/custom-flannel/KubeletFlags (0.22s) + --- PASS: TestNetworkPlugins/group/custom-flannel/NetCatPod (8.13s) + --- PASS: TestNetworkPlugins/group/custom-flannel/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/custom-flannel/Localhost (0.07s) + --- PASS: TestNetworkPlugins/group/custom-flannel/HairPin (0.06s) + --- PASS: TestNetworkPlugins/group/kindnet (86.05s) + --- PASS: TestNetworkPlugins/group/kindnet/Start (53.77s) + --- PASS: TestNetworkPlugins/group/kindnet/ControllerPod (6.00s) + --- PASS: TestNetworkPlugins/group/kindnet/KubeletFlags (0.19s) + --- PASS: TestNetworkPlugins/group/kindnet/NetCatPod (8.10s) + --- PASS: TestNetworkPlugins/group/kindnet/DNS (0.10s) + --- PASS: TestNetworkPlugins/group/kindnet/Localhost (0.08s) + --- PASS: TestNetworkPlugins/group/kindnet/HairPin (0.08s) + --- PASS: TestNetworkPlugins/group/false (62.38s) + --- PASS: TestNetworkPlugins/group/false/Start (35.39s) + --- PASS: TestNetworkPlugins/group/false/KubeletFlags (0.19s) + --- PASS: TestNetworkPlugins/group/false/NetCatPod (8.10s) + --- PASS: TestNetworkPlugins/group/false/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/false/Localhost (0.08s) + --- PASS: TestNetworkPlugins/group/false/HairPin (0.08s) + --- PASS: TestNetworkPlugins/group/flannel (75.25s) + --- PASS: TestNetworkPlugins/group/flannel/Start (44.60s) + --- PASS: TestNetworkPlugins/group/flannel/ControllerPod (6.00s) + --- PASS: TestNetworkPlugins/group/flannel/KubeletFlags (0.20s) + --- PASS: TestNetworkPlugins/group/flannel/NetCatPod (9.09s) + --- PASS: TestNetworkPlugins/group/flannel/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/flannel/Localhost (0.07s) + --- PASS: TestNetworkPlugins/group/flannel/HairPin (0.07s) + --- PASS: TestNetworkPlugins/group/bridge (80.80s) + --- PASS: TestNetworkPlugins/group/bridge/Start (57.65s) + --- PASS: TestNetworkPlugins/group/bridge/KubeletFlags (0.19s) + --- PASS: TestNetworkPlugins/group/bridge/NetCatPod (8.11s) + --- PASS: TestNetworkPlugins/group/bridge/DNS (0.10s) + --- PASS: TestNetworkPlugins/group/bridge/Localhost (0.08s) + --- PASS: TestNetworkPlugins/group/bridge/HairPin (0.07s) + --- PASS: TestNetworkPlugins/group/kubenet (82.01s) + --- PASS: TestNetworkPlugins/group/kubenet/Start (56.10s) + --- PASS: TestNetworkPlugins/group/kubenet/KubeletFlags (0.20s) + --- PASS: TestNetworkPlugins/group/kubenet/NetCatPod (7.10s) + --- PASS: TestNetworkPlugins/group/kubenet/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/kubenet/Localhost (0.08s) + --- PASS: TestNetworkPlugins/group/kubenet/HairPin (0.07s) + --- PASS: TestNetworkPlugins/group/enable-default-cni (86.83s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/Start (59.43s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/KubeletFlags (0.19s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/NetCatPod (9.11s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/DNS (0.09s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/Localhost (0.07s) + --- PASS: TestNetworkPlugins/group/enable-default-cni/HairPin (0.08s) + start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p no-preload-162995 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (45.592735766s) + start_stop_delete_test.go:260: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:184: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-155711 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (1m1.61721443s) +=== RUN TestStartStop/group/embed-certs/serial/DeployApp + start_stop_delete_test.go:194: (dbg) Run: kubectl --context embed-certs-155711 create -f testdata/busybox.yaml +=== RUN TestStartStop/group/no-preload/serial/UserAppExistsAfterStop + start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-ndvpr" [983369c9-edb9-40e5-8101-e014c249c257] Running + start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: waiting 8m0s for pods matching "integration-test=busybox" in namespace "default" ... + helpers_test.go:352: "busybox" [94d54c32-9d96-46b6-ac68-1c88e3c61df7] Pending + helpers_test.go:352: "busybox" [94d54c32-9d96-46b6-ac68-1c88e3c61df7] Pending / Ready:ContainersNotReady (containers with unready status: [busybox]) / ContainersReady:ContainersNotReady (containers with unready status: [busybox]) + helpers_test.go:352: "busybox" [94d54c32-9d96-46b6-ac68-1c88e3c61df7] Running + start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p default-k8s-diff-port-150516 --memory=3072 --alsologtostderr --wait=true --apiserver-port=8444 --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (43.53899261s) + start_stop_delete_test.go:260: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 +=== RUN TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop + start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-nql5s" [1ea6884e-b968-42a5-94ce-cc5d021abfd6] Running + start_stop_delete_test.go:272: (dbg) TestStartStop/group/no-preload/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.002281085s +=== RUN TestStartStop/group/no-preload/serial/AddonExistsAfterStop + start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-ndvpr" [983369c9-edb9-40e5-8101-e014c249c257] Running + start_stop_delete_test.go:194: (dbg) TestStartStop/group/embed-certs/serial/DeployApp: integration-test=busybox healthy within 8.002067715s + start_stop_delete_test.go:194: (dbg) Run: kubectl --context embed-certs-155711 exec busybox -- /bin/sh -c "ulimit -n" +=== RUN TestStartStop/group/embed-certs/serial/EnableAddonWhileActive + start_stop_delete_test.go:203: (dbg) Run: out/minikube-linux-amd64 addons enable metrics-server -p embed-certs-155711 --images=MetricsServer=registry.k8s.io/echoserver:1.4 --registries=MetricsServer=fake.domain + start_stop_delete_test.go:213: (dbg) Run: kubectl --context embed-certs-155711 describe deploy/metrics-server -n kube-system +=== RUN TestStartStop/group/embed-certs/serial/Stop + start_stop_delete_test.go:226: (dbg) Run: out/minikube-linux-amd64 stop -p embed-certs-155711 --alsologtostderr -v=3 + start_stop_delete_test.go:272: (dbg) TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.001810958s +=== RUN TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop + start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-nql5s" [1ea6884e-b968-42a5-94ce-cc5d021abfd6] Running + start_stop_delete_test.go:285: (dbg) TestStartStop/group/no-preload/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.00180085s + start_stop_delete_test.go:289: (dbg) Run: kubectl --context no-preload-162995 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard +=== RUN TestStartStop/group/no-preload/serial/VerifyKubernetesImages + start_stop_delete_test.go:302: (dbg) Run: out/minikube-linux-amd64 -p no-preload-162995 image list --format=json + start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc +=== RUN TestStartStop/group/no-preload/serial/Pause + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p no-preload-162995 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-162995 -n no-preload-162995: exit status 2 (203.026304ms) + + -- stdout -- + Paused + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-162995 -n no-preload-162995: exit status 2 (198.131086ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p no-preload-162995 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p no-preload-162995 -n no-preload-162995 + start_stop_delete_test.go:160: (dbg) Run: out/minikube-linux-amd64 delete -p no-preload-162995 + start_stop_delete_test.go:160: (dbg) Done: out/minikube-linux-amd64 delete -p no-preload-162995: (1.772158583s) + start_stop_delete_test.go:165: (dbg) Run: kubectl config get-contexts no-preload-162995 + start_stop_delete_test.go:165: (dbg) Non-zero exit: kubectl config get-contexts no-preload-162995: exit status 1 (27.468256ms) + + -- stdout -- + CURRENT NAME CLUSTER AUTHINFO NAMESPACE + + -- /stdout -- + ** stderr ** + error: context no-preload-162995 not found + + ** /stderr ** + start_stop_delete_test.go:167: config context error: exit status 1 (may be ok) + helpers_test.go:175: Cleaning up "no-preload-162995" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p no-preload-162995 +E1102 23:28:39.811213 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:39.817508 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:39.828794 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:39.850020 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:39.891247 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:39.972545 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:40.133911 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:285: (dbg) TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.002370533s + start_stop_delete_test.go:289: (dbg) Run: kubectl --context default-k8s-diff-port-150516 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard +=== RUN TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages + start_stop_delete_test.go:302: (dbg) Run: out/minikube-linux-amd64 -p default-k8s-diff-port-150516 image list --format=json + start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc +=== RUN TestStartStop/group/default-k8s-diff-port/serial/Pause + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p default-k8s-diff-port-150516 --alsologtostderr -v=1 +E1102 23:28:40.456038 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516: exit status 2 (202.546499ms) + + -- stdout -- + Paused + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 +E1102 23:28:41.098090 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516: exit status 2 (198.559261ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p default-k8s-diff-port-150516 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p default-k8s-diff-port-150516 -n default-k8s-diff-port-150516 + start_stop_delete_test.go:160: (dbg) Run: out/minikube-linux-amd64 delete -p default-k8s-diff-port-150516 +E1102 23:28:42.379978 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:160: (dbg) Done: out/minikube-linux-amd64 delete -p default-k8s-diff-port-150516: (1.738053097s) + start_stop_delete_test.go:165: (dbg) Run: kubectl config get-contexts default-k8s-diff-port-150516 + start_stop_delete_test.go:165: (dbg) Non-zero exit: kubectl config get-contexts default-k8s-diff-port-150516: exit status 1 (30.728432ms) + + -- stdout -- + CURRENT NAME CLUSTER AUTHINFO NAMESPACE + + -- /stdout -- + ** stderr ** + error: context default-k8s-diff-port-150516 not found + + ** /stderr ** + start_stop_delete_test.go:167: config context error: exit status 1 (may be ok) + helpers_test.go:175: Cleaning up "default-k8s-diff-port-150516" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p default-k8s-diff-port-150516 + start_stop_delete_test.go:226: (dbg) Done: out/minikube-linux-amd64 stop -p embed-certs-155711 --alsologtostderr -v=3: (10.626317554s) +=== RUN TestStartStop/group/embed-certs/serial/EnableAddonAfterStop + start_stop_delete_test.go:237: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-155711 -n embed-certs-155711 + start_stop_delete_test.go:237: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-155711 -n embed-certs-155711: exit status 7 (37.141813ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:237: status error: exit status 7 (may be ok) + start_stop_delete_test.go:244: (dbg) Run: out/minikube-linux-amd64 addons enable dashboard -p embed-certs-155711 --images=MetricsScraper=registry.k8s.io/echoserver:1.4 +=== RUN TestStartStop/group/embed-certs/serial/SecondStart + start_stop_delete_test.go:254: (dbg) Run: out/minikube-linux-amd64 start -p embed-certs-155711 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1 +E1102 23:28:44.941447 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:28:50.063482 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:00.305093 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:05.677321 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.557419 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.563697 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.574954 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.596233 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.637486 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.718774 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:19.880348 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:20.201872 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:20.786730 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/auto-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:20.843966 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:22.126057 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:24.688016 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.181710 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.187957 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.199234 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.220644 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.261893 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.343325 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.504695 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:26.826041 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:27.467946 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:28.750124 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:29.809741 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.312309 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.318567 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.329827 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.351085 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.392547 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.473846 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.635251 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:30.956712 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:31.312061 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:254: (dbg) Done: out/minikube-linux-amd64 start -p embed-certs-155711 --memory=3072 --alsologtostderr --wait=true --embed-certs --driver=docker --container-runtime=docker --kubernetes-version=v1.34.1: (47.55891815s) + start_stop_delete_test.go:260: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p embed-certs-155711 -n embed-certs-155711 +E1102 23:29:31.598518 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +=== RUN TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop + start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-qqg9n" [39bf47ad-6f69-4d02-8c88-656c83d0f694] Running +E1102 23:29:32.880064 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:33.378897 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/skaffold-173551/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:35.442096 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:36.433626 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/kindnet-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:272: (dbg) TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 6.001842481s +=== RUN TestStartStop/group/embed-certs/serial/AddonExistsAfterStop + start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: waiting 9m0s for pods matching "k8s-app=kubernetes-dashboard" in namespace "kubernetes-dashboard" ... + helpers_test.go:352: "kubernetes-dashboard-855c9754f9-qqg9n" [39bf47ad-6f69-4d02-8c88-656c83d0f694] Running +E1102 23:29:39.850532 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:39.856811 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:39.868036 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:39.889290 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:39.930531 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:40.011831 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:40.051118 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/calico-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:40.173455 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:40.495117 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:40.563433 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/custom-flannel-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:41.136326 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:41.961651 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/addons-448331/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" +E1102 23:29:42.418048 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:285: (dbg) TestStartStop/group/embed-certs/serial/AddonExistsAfterStop: k8s-app=kubernetes-dashboard healthy within 5.001812973s + start_stop_delete_test.go:289: (dbg) Run: kubectl --context embed-certs-155711 describe deploy/dashboard-metrics-scraper -n kubernetes-dashboard +=== RUN TestStartStop/group/embed-certs/serial/VerifyKubernetesImages + start_stop_delete_test.go:302: (dbg) Run: out/minikube-linux-amd64 -p embed-certs-155711 image list --format=json + start_stop_delete_test.go:302: Found non-minikube image: gcr.io/k8s-minikube/busybox:1.28.4-glibc +=== RUN TestStartStop/group/embed-certs/serial/Pause + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 pause -p embed-certs-155711 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-155711 -n embed-certs-155711 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-155711 -n embed-certs-155711: exit status 2 (200.349214ms) + + -- stdout -- + Paused + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-155711 -n embed-certs-155711 + start_stop_delete_test.go:309: (dbg) Non-zero exit: out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-155711 -n embed-certs-155711: exit status 2 (196.40794ms) + + -- stdout -- + Stopped + + -- /stdout -- + start_stop_delete_test.go:309: status error: exit status 2 (may be ok) + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 unpause -p embed-certs-155711 --alsologtostderr -v=1 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p embed-certs-155711 -n embed-certs-155711 + start_stop_delete_test.go:309: (dbg) Run: out/minikube-linux-amd64 status --format={{.Kubelet}} -p embed-certs-155711 -n embed-certs-155711 + start_stop_delete_test.go:160: (dbg) Run: out/minikube-linux-amd64 delete -p embed-certs-155711 +E1102 23:29:44.979895 37869 cert_rotation.go:172] "Loading client cert failed" err="open /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/false-999044/client.crt: no such file or directory" logger="tls-transport-cache.UnhandledError" key="key" + start_stop_delete_test.go:160: (dbg) Done: out/minikube-linux-amd64 delete -p embed-certs-155711: (1.770284693s) + start_stop_delete_test.go:165: (dbg) Run: kubectl config get-contexts embed-certs-155711 + start_stop_delete_test.go:165: (dbg) Non-zero exit: kubectl config get-contexts embed-certs-155711: exit status 1 (28.645463ms) + + -- stdout -- + CURRENT NAME CLUSTER AUTHINFO NAMESPACE + + -- /stdout -- + ** stderr ** + error: context embed-certs-155711 not found + + ** /stderr ** + start_stop_delete_test.go:167: config context error: exit status 1 (may be ok) + helpers_test.go:175: Cleaning up "embed-certs-155711" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p embed-certs-155711 +--- PASS: TestStartStop (507.58s) + --- PASS: TestStartStop/group (0.00s) + --- PASS: TestStartStop/group/newest-cni (49.42s) + --- PASS: TestStartStop/group/newest-cni/serial (49.33s) + --- PASS: TestStartStop/group/newest-cni/serial/FirstStart (25.31s) + --- PASS: TestStartStop/group/newest-cni/serial/DeployApp (0.00s) + --- PASS: TestStartStop/group/newest-cni/serial/EnableAddonWhileActive (0.46s) + --- PASS: TestStartStop/group/newest-cni/serial/Stop (10.49s) + --- PASS: TestStartStop/group/newest-cni/serial/EnableAddonAfterStop (0.10s) + --- PASS: TestStartStop/group/newest-cni/serial/SecondStart (9.54s) + --- PASS: TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop (0.00s) + --- PASS: TestStartStop/group/newest-cni/serial/AddonExistsAfterStop (0.00s) + --- PASS: TestStartStop/group/newest-cni/serial/VerifyKubernetesImages (0.16s) + --- PASS: TestStartStop/group/newest-cni/serial/Pause (1.53s) + --- SKIP: TestStartStop/group/disable-driver-mounts (0.09s) + --- PASS: TestStartStop/group/old-k8s-version (118.53s) + --- PASS: TestStartStop/group/old-k8s-version/serial (118.44s) + --- PASS: TestStartStop/group/old-k8s-version/serial/FirstStart (38.02s) + --- PASS: TestStartStop/group/old-k8s-version/serial/DeployApp (7.16s) + --- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive (0.54s) + --- PASS: TestStartStop/group/old-k8s-version/serial/Stop (10.48s) + --- PASS: TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop (0.10s) + --- PASS: TestStartStop/group/old-k8s-version/serial/SecondStart (47.12s) + --- PASS: TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop (6.00s) + --- PASS: TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop (5.05s) + --- PASS: TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages (0.55s) + --- PASS: TestStartStop/group/old-k8s-version/serial/Pause (1.63s) + --- PASS: TestStartStop/group/no-preload (148.50s) + --- PASS: TestStartStop/group/no-preload/serial (148.41s) + --- PASS: TestStartStop/group/no-preload/serial/FirstStart (68.75s) + --- PASS: TestStartStop/group/no-preload/serial/DeployApp (8.17s) + --- PASS: TestStartStop/group/no-preload/serial/EnableAddonWhileActive (0.56s) + --- PASS: TestStartStop/group/no-preload/serial/Stop (10.49s) + --- PASS: TestStartStop/group/no-preload/serial/EnableAddonAfterStop (0.10s) + --- PASS: TestStartStop/group/no-preload/serial/SecondStart (45.82s) + --- PASS: TestStartStop/group/no-preload/serial/UserAppExistsAfterStop (6.00s) + --- PASS: TestStartStop/group/no-preload/serial/AddonExistsAfterStop (5.04s) + --- PASS: TestStartStop/group/no-preload/serial/VerifyKubernetesImages (0.16s) + --- PASS: TestStartStop/group/no-preload/serial/Pause (1.52s) + --- PASS: TestStartStop/group/default-k8s-diff-port (143.30s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial (143.21s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/FirstStart (65.56s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/DeployApp (8.26s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive (0.52s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/Stop (10.51s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop (0.13s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/SecondStart (43.76s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop (6.00s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop (5.04s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages (0.16s) + --- PASS: TestStartStop/group/default-k8s-diff-port/serial/Pause (1.50s) + --- PASS: TestStartStop/group/embed-certs (143.34s) + --- PASS: TestStartStop/group/embed-certs/serial (143.26s) + --- PASS: TestStartStop/group/embed-certs/serial/FirstStart (61.62s) + --- PASS: TestStartStop/group/embed-certs/serial/DeployApp (8.14s) + --- PASS: TestStartStop/group/embed-certs/serial/EnableAddonWhileActive (0.50s) + --- PASS: TestStartStop/group/embed-certs/serial/Stop (10.63s) + --- PASS: TestStartStop/group/embed-certs/serial/EnableAddonAfterStop (0.10s) + --- PASS: TestStartStop/group/embed-certs/serial/SecondStart (47.78s) + --- PASS: TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop (6.00s) + --- PASS: TestStartStop/group/embed-certs/serial/AddonExistsAfterStop (5.04s) + --- PASS: TestStartStop/group/embed-certs/serial/VerifyKubernetesImages (0.16s) + --- PASS: TestStartStop/group/embed-certs/serial/Pause (1.49s) +FAIL +Tests completed in 43m18.975505587s (result code 1) + +=== Skipped +=== SKIP: TestDownloadOnly/v1.28.0/cached-images (0.00s) + aaa_download_only_test.go:128: Preload exists, images won't be cached + --- SKIP: TestDownloadOnly/v1.28.0/cached-images (0.00s) + +=== SKIP: TestDownloadOnly/v1.28.0/binaries (0.00s) + aaa_download_only_test.go:150: Preload exists, binaries are present within. + --- SKIP: TestDownloadOnly/v1.28.0/binaries (0.00s) + +=== SKIP: TestDownloadOnly/v1.28.0/kubectl (0.00s) + aaa_download_only_test.go:166: Test for darwin and windows + --- SKIP: TestDownloadOnly/v1.28.0/kubectl (0.00s) + +=== SKIP: TestDownloadOnly/v1.34.1/cached-images (0.00s) + aaa_download_only_test.go:128: Preload exists, images won't be cached + --- SKIP: TestDownloadOnly/v1.34.1/cached-images (0.00s) + +=== SKIP: TestDownloadOnly/v1.34.1/binaries (0.00s) + aaa_download_only_test.go:150: Preload exists, binaries are present within. + --- SKIP: TestDownloadOnly/v1.34.1/binaries (0.00s) + +=== SKIP: TestDownloadOnly/v1.34.1/kubectl (0.00s) + aaa_download_only_test.go:166: Test for darwin and windows + --- SKIP: TestDownloadOnly/v1.34.1/kubectl (0.00s) + +=== SKIP: TestAddons/serial/GCPAuth/RealCredentials (0.00s) + addons_test.go:763: skipping GCPAuth addon test until 'Permission "artifactregistry.repositories.downloadArtifacts" denied on resource "projects/k8s-minikube/locations/us/repositories/test-artifacts" (or it may not exist)' issue is resolved + --- SKIP: TestAddons/serial/GCPAuth/RealCredentials (0.00s) + +=== SKIP: TestAddons/parallel/Olm (0.00s) + addons_test.go:483: Skipping OLM addon test until https://github.com/operator-framework/operator-lifecycle-manager/issues/2534 is resolved + --- SKIP: TestAddons/parallel/Olm (0.00s) + +=== SKIP: TestDockerEnvContainerd (0.00s) + docker_test.go:170: running with docker true linux amd64 + docker_test.go:172: skipping: TestDockerEnvContainerd can only be run with the containerd runtime on Docker driver + +=== SKIP: TestHyperKitDriverInstallOrUpdate (0.00s) + driver_install_or_update_test.go:37: Skip if not darwin. + +=== SKIP: TestHyperkitDriverSkipUpgrade (0.00s) + driver_install_or_update_test.go:101: Skip if not darwin. + +=== SKIP: TestFunctional/parallel/PodmanEnv (0.00s) + functional_test.go:565: only validate podman env with docker container runtime, currently testing docker + --- SKIP: TestFunctional/parallel/PodmanEnv (0.00s) + +=== SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s) + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig (0.00s) + +=== SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s) + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil (0.00s) + +=== SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s) + functional_test_tunnel_test.go:99: DNS forwarding is only supported for Hyperkit on Darwin, skipping test DNS forwarding + --- SKIP: TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS (0.00s) + +=== SKIP: TestFunctionalNewestKubernetes (0.00s) + functional_test.go:82: + +=== SKIP: TestGvisorAddon (0.00s) + gvisor_addon_test.go:34: skipping test because --gvisor=false + +=== SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s) + image_test.go:114: skipping due to https://github.com/kubernetes/minikube/issues/12431 + --- SKIP: TestImageBuild/serial/validateImageBuildWithBuildEnv (0.00s) + +=== SKIP: TestISOImage (0.00s) + iso_test.go:35: This test requires a VM driver + +=== SKIP: TestChangeNoneUser (0.00s) + none_test.go:38: Test requires none driver and SUDO_USER env to not be empty + +=== SKIP: TestScheduledStopWindows (0.00s) + scheduled_stop_test.go:42: test only runs on windows + +=== SKIP: TestNetworkPlugins/group/cilium (1.74s) + net_test.go:102: Skipping the test as it's interfering with other tests and is outdated + panic.go:636: + ----------------------- debugLogs start: cilium-999044 [pass: true] -------------------------------- + >>> netcat: nslookup kubernetes.default: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nslookup debug kubernetes.default a-records: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig search kubernetes.default: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local udp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: dig @10.96.0.10 kubernetes.default.svc.cluster.local tcp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nc 10.96.0.10 udp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: nc 10.96.0.10 tcp/53: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/nsswitch.conf: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/hosts: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> netcat: /etc/resolv.conf: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: /etc/nsswitch.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/hosts: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/resolv.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: nodes, services, endpoints, daemon sets, deployments and pods, : + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: crictl pods: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crictl containers: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: describe netcat deployment: + error: context "cilium-999044" does not exist + + + >>> k8s: describe netcat pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: netcat logs: + error: context "cilium-999044" does not exist + + + >>> k8s: describe coredns deployment: + error: context "cilium-999044" does not exist + + + >>> k8s: describe coredns pods: + error: context "cilium-999044" does not exist + + + >>> k8s: coredns logs: + error: context "cilium-999044" does not exist + + + >>> k8s: describe api server pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: api server logs: + error: context "cilium-999044" does not exist + + + >>> host: /etc/cni: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: ip a s: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: ip r s: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: iptables-save: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: iptables table nat: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: describe cilium daemon set: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: describe cilium daemon set pod(s): + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: cilium daemon set container(s) logs (current): + error: context "cilium-999044" does not exist + + + >>> k8s: cilium daemon set container(s) logs (previous): + error: context "cilium-999044" does not exist + + + >>> k8s: describe cilium deployment: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: describe cilium deployment pod(s): + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> k8s: cilium deployment container(s) logs (current): + error: context "cilium-999044" does not exist + + + >>> k8s: cilium deployment container(s) logs (previous): + error: context "cilium-999044" does not exist + + + >>> k8s: describe kube-proxy daemon set: + error: context "cilium-999044" does not exist + + + >>> k8s: describe kube-proxy pod(s): + error: context "cilium-999044" does not exist + + + >>> k8s: kube-proxy logs: + error: context "cilium-999044" does not exist + + + >>> host: kubelet daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: kubelet daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: kubelet logs: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/kubernetes/kubelet.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /var/lib/kubelet/config.yaml: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> k8s: kubectl config: + apiVersion: v1 + clusters: + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:55 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.103.2:8443 + name: NoKubernetes-160693 + - cluster: + certificate-authority: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: cluster_info + server: https://192.168.94.2:8443 + name: cert-expiration-250017 + contexts: + - context: + cluster: NoKubernetes-160693 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:55 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: NoKubernetes-160693 + name: NoKubernetes-160693 + - context: + cluster: cert-expiration-250017 + extensions: + - extension: + last-update: Sun, 02 Nov 2025 23:19:58 UTC + provider: minikube.sigs.k8s.io + version: v1.37.0 + name: context_info + namespace: default + user: cert-expiration-250017 + name: cert-expiration-250017 + current-context: cert-expiration-250017 + kind: Config + users: + - name: NoKubernetes-160693 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/NoKubernetes-160693/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/NoKubernetes-160693/client.key + - name: cert-expiration-250017 + user: + client-certificate: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/cert-expiration-250017/client.crt + client-key: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/cert-expiration-250017/client.key + + + >>> k8s: cms: + Error in configuration: context was not found for specified context: cilium-999044 + + + >>> host: docker daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: docker daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/docker/daemon.json: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: docker system info: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-docker daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-docker daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/systemd/system/cri-docker.service.d/10-cni.conf: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /usr/lib/systemd/system/cri-docker.service: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: cri-dockerd version: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /lib/systemd/system/containerd.service: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/containerd/config.toml: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: containerd config dump: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio daemon status: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio daemon config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: /etc/crio: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + + >>> host: crio config: + * Profile "cilium-999044" not found. Run "minikube profile list" to view all profiles. + To start a cluster, run: "minikube start -p cilium-999044" + + ----------------------- debugLogs end: cilium-999044 [took: 1.6580351s] -------------------------------- + helpers_test.go:175: Cleaning up "cilium-999044" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p cilium-999044 + --- SKIP: TestNetworkPlugins/group/cilium (1.74s) + +=== SKIP: TestStartStop/group/disable-driver-mounts (0.09s) + start_stop_delete_test.go:101: skipping TestStartStop/group/disable-driver-mounts - only runs on virtualbox + helpers_test.go:175: Cleaning up "disable-driver-mounts-452112" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p disable-driver-mounts-452112 + --- SKIP: TestStartStop/group/disable-driver-mounts (0.09s) + +=== Failed +=== FAIL: TestScheduledStopUnix (21.40s) + scheduled_stop_test.go:128: (dbg) Run: out/minikube-linux-amd64 start -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker + scheduled_stop_test.go:128: (dbg) Done: out/minikube-linux-amd64 start -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker: (18.556015798s) + scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-amd64 stop -p scheduled-stop-206205 --schedule 5m + scheduled_stop_test.go:191: (dbg) Run: out/minikube-linux-amd64 status --format={{.TimeToStop}} -p scheduled-stop-206205 -n scheduled-stop-206205 + scheduled_stop_test.go:169: signal error was: + scheduled_stop_test.go:137: (dbg) Run: out/minikube-linux-amd64 stop -p scheduled-stop-206205 --schedule 15s + scheduled_stop_test.go:169: signal error was: + scheduled_stop_test.go:98: process 256904 running but should have been killed on reschedule of stop + panic.go:636: *** TestScheduledStopUnix FAILED at 2025-11-02 23:18:12.413480736 +0000 UTC m=+1905.177894699 + helpers_test.go:222: -----------------------post-mortem-------------------------------- + helpers_test.go:223: ======> post-mortem[TestScheduledStopUnix]: network settings <====== + helpers_test.go:230: HOST ENV snapshots: PROXY env: HTTP_PROXY="" HTTPS_PROXY="" NO_PROXY="" + helpers_test.go:238: ======> post-mortem[TestScheduledStopUnix]: docker inspect <====== + helpers_test.go:239: (dbg) Run: docker inspect scheduled-stop-206205 + helpers_test.go:243: (dbg) docker inspect scheduled-stop-206205: + + -- stdout -- + [ + { + "Id": "31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff", + "Created": "2025-11-02T23:17:57.681999844Z", + "Path": "/usr/local/bin/entrypoint", + "Args": [ + "/sbin/init" + ], + "State": { + "Status": "running", + "Running": true, + "Paused": false, + "Restarting": false, + "OOMKilled": false, + "Dead": false, + "Pid": 254030, + "ExitCode": 0, + "Error": "", + "StartedAt": "2025-11-02T23:17:57.692769206Z", + "FinishedAt": "0001-01-01T00:00:00Z" + }, + "Image": "sha256:a1caeebaf98ed0136731e905a1e086f77985a42c2ebb5a7e0b3d0bd7fcbe10cc", + "ResolvConfPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/resolv.conf", + "HostnamePath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/hostname", + "HostsPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/hosts", + "LogPath": "/docker-graph/containers/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff/31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff-json.log", + "Name": "/scheduled-stop-206205", + "RestartCount": 0, + "Driver": "overlay2", + "Platform": "linux", + "MountLabel": "", + "ProcessLabel": "", + "AppArmorProfile": "", + "ExecIDs": null, + "HostConfig": { + "Binds": [ + "scheduled-stop-206205:/var", + "/lib/modules:/lib/modules:ro" + ], + "ContainerIDFile": "", + "LogConfig": { + "Type": "json-file", + "Config": {} + }, + "NetworkMode": "scheduled-stop-206205", + "PortBindings": { + "22/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "2376/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "32443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "5000/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ], + "8443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "" + } + ] + }, + "RestartPolicy": { + "Name": "no", + "MaximumRetryCount": 0 + }, + "AutoRemove": false, + "VolumeDriver": "", + "VolumesFrom": null, + "ConsoleSize": [ + 0, + 0 + ], + "CapAdd": null, + "CapDrop": null, + "CgroupnsMode": "private", + "Dns": [], + "DnsOptions": [], + "DnsSearch": [], + "ExtraHosts": null, + "GroupAdd": null, + "IpcMode": "private", + "Cgroup": "", + "Links": null, + "OomScoreAdj": 0, + "PidMode": "", + "Privileged": true, + "PublishAllPorts": false, + "ReadonlyRootfs": false, + "SecurityOpt": [ + "seccomp=unconfined", + "apparmor=unconfined", + "label=disable" + ], + "Tmpfs": { + "/run": "", + "/tmp": "" + }, + "UTSMode": "", + "UsernsMode": "", + "ShmSize": 67108864, + "Runtime": "runc", + "Isolation": "", + "CpuShares": 0, + "Memory": 3221225472, + "NanoCpus": 0, + "CgroupParent": "", + "BlkioWeight": 0, + "BlkioWeightDevice": [], + "BlkioDeviceReadBps": [], + "BlkioDeviceWriteBps": [], + "BlkioDeviceReadIOps": [], + "BlkioDeviceWriteIOps": [], + "CpuPeriod": 0, + "CpuQuota": 0, + "CpuRealtimePeriod": 0, + "CpuRealtimeRuntime": 0, + "CpusetCpus": "", + "CpusetMems": "", + "Devices": [], + "DeviceCgroupRules": null, + "DeviceRequests": null, + "MemoryReservation": 0, + "MemorySwap": 6442450944, + "MemorySwappiness": null, + "OomKillDisable": null, + "PidsLimit": null, + "Ulimits": [], + "CpuCount": 0, + "CpuPercent": 0, + "IOMaximumIOps": 0, + "IOMaximumBandwidth": 0, + "MaskedPaths": null, + "ReadonlyPaths": null + }, + "GraphDriver": { + "Data": { + "ID": "31f6056f95e08bf2b805609824e1ed416bc76493b4814d6f006fbb3ba6ec30ff", + "LowerDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc-init/diff:/docker-graph/overlay2/2c0bdfc211c693b1d687c1518f6a16150f580942f571d34b0e676e2dce7580a1/diff", + "MergedDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/merged", + "UpperDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/diff", + "WorkDir": "/docker-graph/overlay2/6e66def04d0fd78d730127ced6fbc4cfd399be8dd154964ecd4b00fffc2a71cc/work" + }, + "Name": "overlay2" + }, + "Mounts": [ + { + "Type": "volume", + "Name": "scheduled-stop-206205", + "Source": "/docker-graph/volumes/scheduled-stop-206205/_data", + "Destination": "/var", + "Driver": "local", + "Mode": "z", + "RW": true, + "Propagation": "" + }, + { + "Type": "bind", + "Source": "/lib/modules", + "Destination": "/lib/modules", + "Mode": "ro", + "RW": false, + "Propagation": "rprivate" + } + ], + "Config": { + "Hostname": "scheduled-stop-206205", + "Domainname": "", + "User": "", + "AttachStdin": false, + "AttachStdout": false, + "AttachStderr": false, + "ExposedPorts": { + "22/tcp": {}, + "2376/tcp": {}, + "32443/tcp": {}, + "5000/tcp": {}, + "8443/tcp": {} + }, + "Tty": true, + "OpenStdin": false, + "StdinOnce": false, + "Env": [ + "container=docker", + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + ], + "Cmd": null, + "Image": "gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8", + "Volumes": null, + "WorkingDir": "/", + "Entrypoint": [ + "/usr/local/bin/entrypoint", + "/sbin/init" + ], + "OnBuild": null, + "Labels": { + "created_by.minikube.sigs.k8s.io": "true", + "mode.minikube.sigs.k8s.io": "scheduled-stop-206205", + "name.minikube.sigs.k8s.io": "scheduled-stop-206205", + "role.minikube.sigs.k8s.io": "" + }, + "StopSignal": "SIGRTMIN+3" + }, + "NetworkSettings": { + "Bridge": "", + "SandboxID": "cbbe92502d7f53f3014e4c7321c79a89e2dd2d7ee88c7f5d7c41d34cea7c13a1", + "SandboxKey": "/var/run/docker/netns/cbbe92502d7f", + "Ports": { + "22/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32969" + } + ], + "2376/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32970" + } + ], + "32443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32973" + } + ], + "5000/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32971" + } + ], + "8443/tcp": [ + { + "HostIp": "127.0.0.1", + "HostPort": "32972" + } + ] + }, + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "scheduled-stop-206205": { + "IPAMConfig": { + "IPv4Address": "192.168.76.2" + }, + "Links": null, + "Aliases": null, + "MacAddress": "0a:da:fc:b0:02:65", + "DriverOpts": null, + "GwPriority": 0, + "NetworkID": "66cb68aed641a7c6a3dc7284672d02d3b05dc7509842df0738264f86786d9811", + "EndpointID": "b4003dcb7de9462fe48e835483ccc98d5d1a769f26bf6c438f9cbefa4d27035c", + "Gateway": "192.168.76.1", + "IPAddress": "192.168.76.2", + "IPPrefixLen": 24, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "DNSNames": [ + "scheduled-stop-206205", + "31f6056f95e0" + ] + } + } + } + } + ] + + -- /stdout -- + helpers_test.go:247: (dbg) Run: out/minikube-linux-amd64 status --format={{.Host}} -p scheduled-stop-206205 -n scheduled-stop-206205 + helpers_test.go:252: <<< TestScheduledStopUnix FAILED: start of post-mortem logs <<< + helpers_test.go:253: ======> post-mortem[TestScheduledStopUnix]: minikube logs <====== + helpers_test.go:255: (dbg) Run: out/minikube-linux-amd64 -p scheduled-stop-206205 logs -n 25 + helpers_test.go:260: TestScheduledStopUnix logs: + -- stdout -- + + ==> Audit <== + ┌─────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┬───────────────────────┬──────────┬─────────┬─────────────────────┬─────────────────────┐ + │ COMMAND │ ARGS │ PROFILE │ USER │ VERSION │ START TIME │ END TIME │ + ├─────────┼─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┼───────────────────────┼──────────┼─────────┼─────────────────────┼─────────────────────┤ + │ stop │ -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:13 UTC │ 02 Nov 25 23:13 UTC │ + │ start │ -p multinode-809166 --wait=true -v=5 --alsologtostderr │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:13 UTC │ 02 Nov 25 23:14 UTC │ + │ node │ list -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ │ + │ node │ multinode-809166 node delete m03 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:14 UTC │ + │ stop │ multinode-809166 stop │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:14 UTC │ + │ start │ -p multinode-809166 --wait=true -v=5 --alsologtostderr --driver=docker --container-runtime=docker │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:14 UTC │ 02 Nov 25 23:15 UTC │ + │ node │ list -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ start │ -p multinode-809166-m02 --driver=docker --container-runtime=docker │ multinode-809166-m02 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ start │ -p multinode-809166-m03 --driver=docker --container-runtime=docker │ multinode-809166-m03 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ node │ add -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ │ + │ delete │ -p multinode-809166-m03 │ multinode-809166-m03 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ delete │ -p multinode-809166 │ multinode-809166 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:15 UTC │ + │ start │ -p test-preload-092353 --memory=3072 --alsologtostderr --wait=true --preload=false --driver=docker --container-runtime=docker --kubernetes-version=v1.32.0 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:15 UTC │ 02 Nov 25 23:16 UTC │ + │ image │ test-preload-092353 image pull gcr.io/k8s-minikube/busybox │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:16 UTC │ + │ stop │ -p test-preload-092353 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:16 UTC │ + │ start │ -p test-preload-092353 --memory=3072 --alsologtostderr -v=1 --wait=true --driver=docker --container-runtime=docker │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:16 UTC │ 02 Nov 25 23:17 UTC │ + │ image │ test-preload-092353 image list │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:17 UTC │ + │ delete │ -p test-preload-092353 │ test-preload-092353 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:17 UTC │ + │ start │ -p scheduled-stop-206205 --memory=3072 --driver=docker --container-runtime=docker │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:17 UTC │ 02 Nov 25 23:18 UTC │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 5m │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + │ stop │ -p scheduled-stop-206205 --schedule 15s │ scheduled-stop-206205 │ minikube │ v1.37.0 │ 02 Nov 25 23:18 UTC │ │ + └─────────┴─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┴───────────────────────┴──────────┴─────────┴─────────────────────┴─────────────────────┘ + + + ==> Last Start <== + Log file created at: 2025/11/02 23:17:53 + Running on machine: ec6b3253-b39b-4dea-b672-e2db97323995 + Binary: Built with gc go1.24.6 for linux/amd64 + Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg + I1102 23:17:53.619434 253591 out.go:360] Setting OutFile to fd 1 ... + I1102 23:17:53.619588 253591 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:17:53.619590 253591 out.go:374] Setting ErrFile to fd 2... + I1102 23:17:53.619592 253591 out.go:408] TERM=xterm,COLORTERM=, which probably does not support color + I1102 23:17:53.619724 253591 root.go:338] Updating PATH: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/bin + I1102 23:17:53.619971 253591 out.go:368] Setting JSON to false + I1102 23:17:53.626493 253591 start.go:133] hostinfo: {"hostname":"ec6b3253-b39b-4dea-b672-e2db97323995","uptime":1037808,"bootTime":1761087666,"procs":105,"os":"linux","platform":"debian","platformFamily":"debian","platformVersion":"12.12","kernelVersion":"6.6.97+","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"guest","hostId":"4a5d182f-ea69-4eda-bb72-26ec6daf619d"} + I1102 23:17:53.626551 253591 start.go:143] virtualization: kvm guest + I1102 23:17:53.626867 253591 out.go:179] * [scheduled-stop-206205] minikube v1.37.0 on Debian 12.12 (kvm/amd64) + I1102 23:17:53.627003 253591 out.go:179] - MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + I1102 23:17:53.627069 253591 notify.go:221] Checking for updates... + I1102 23:17:53.627225 253591 out.go:179] - KUBECONFIG=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 23:17:53.627327 253591 out.go:179] - MINIKUBE_HOME=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube + I1102 23:17:53.627437 253591 out.go:179] - MINIKUBE_BIN=out/minikube-linux-amd64 + I1102 23:17:53.627553 253591 out.go:179] - MINIKUBE_FORCE_SYSTEMD= + I1102 23:17:53.628096 253591 driver.go:422] Setting default libvirt URI to qemu:///system + I1102 23:17:53.641440 253591 docker.go:124] docker version: linux-28.5.1:Docker Engine - Community + I1102 23:17:53.641487 253591 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:17:53.672246 253591 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:49 SystemTime:2025-11-02 23:17:53.666641765 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:17:53.672296 253591 docker.go:319] overlay module found + I1102 23:17:53.672501 253591 out.go:179] * Using the docker driver based on user configuration + I1102 23:17:53.672610 253591 start.go:309] selected driver: docker + I1102 23:17:53.672616 253591 start.go:930] validating driver "docker" against + I1102 23:17:53.672622 253591 start.go:941] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} + I1102 23:17:53.673028 253591 cli_runner.go:164] Run: docker system info --format "{{json .}}" + I1102 23:17:53.700480 253591 info.go:266] docker info: {ID:c2b1af59-a1e1-4db0-9aaa-240bf36ebecd Containers:0 ContainersRunning:0 ContainersPaused:0 ContainersStopped:0 Images:2 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy false] [Native Overlay Diff true] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:false BridgeNfIP6Tables:false Debug:false NFd:25 OomKillDisable:false NGoroutines:49 SystemTime:2025-11-02 23:17:53.695117802 +0000 UTC LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:6.6.97+ OperatingSystem:Debian GNU/Linux 12 (bookworm) (containerized) OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[::1/128 127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[https://mirror.gcr.io/] Secure:true Official:true}} Mirrors:[https://mirror.gcr.io/]} NCPU:8 MemTotal:65320062976 GenericResources: DockerRootDir:/docker-graph HTTPProxy: HTTPSProxy: NoProxy: Name:ec6b3253-b39b-4dea-b672-e2db97323995 Labels:[] ExperimentalBuild:false ServerVersion:28.5.1 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:b98a3aace656320842a23f4a392a33f46af97866 Expected:} RuncCommit:{ID:v1.3.0-0-g4ca628d1 Expected:} InitCommit:{ID:de40ad0 Expected:} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/libexec/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:v0.29.1]] Warnings:}} + I1102 23:17:53.700553 253591 start_flags.go:327] no existing cluster config was found, will generate one from the flags + I1102 23:17:53.700665 253591 start_flags.go:974] Wait components to verify : map[apiserver:true system_pods:true] + I1102 23:17:53.700875 253591 out.go:179] * Using Docker driver with root privileges + I1102 23:17:53.701059 253591 cni.go:84] Creating CNI manager for "" + I1102 23:17:53.701094 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:17:53.701098 253591 start_flags.go:336] Found "bridge CNI" CNI - setting NetworkPlugin=cni + I1102 23:17:53.701125 253591 start.go:353] cluster config: + {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 23:17:53.701307 253591 out.go:179] * Starting "scheduled-stop-206205" primary control-plane node in "scheduled-stop-206205" cluster + I1102 23:17:53.701399 253591 cache.go:124] Beginning downloading kic base image for docker with docker + I1102 23:17:53.701524 253591 out.go:179] * Pulling base image v0.0.48-1760939008-21773 ... + I1102 23:17:53.701631 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:17:53.701658 253591 preload.go:198] Found local preload: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 + I1102 23:17:53.701660 253591 cache.go:59] Caching tarball of preloaded images + I1102 23:17:53.701693 253591 image.go:81] Checking for gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon + I1102 23:17:53.701721 253591 preload.go:233] Found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4 in cache, skipping download + I1102 23:17:53.701726 253591 cache.go:62] Finished verifying existence of preloaded tar for v1.34.1 on docker + I1102 23:17:53.701978 253591 profile.go:143] Saving config to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json ... + I1102 23:17:53.701990 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json: {Name:mk241f7c6d5362604a224f88c0eaf0c33a5d7694 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:17:53.712504 253591 image.go:100] Found gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 in local docker daemon, skipping pull + I1102 23:17:53.712509 253591 cache.go:148] gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 exists in daemon, skipping load + I1102 23:17:53.712516 253591 cache.go:233] Successfully downloaded all kic artifacts + I1102 23:17:53.712531 253591 start.go:360] acquireMachinesLock for scheduled-stop-206205: {Name:mk5eda29c86e1f811472fcc8ec5414843874c055 Clock:{} Delay:500ms Timeout:10m0s Cancel:} + I1102 23:17:53.712606 253591 start.go:364] duration metric: took 67.976µs to acquireMachinesLock for "scheduled-stop-206205" + I1102 23:17:53.712620 253591 start.go:93] Provisioning new machine with config: &{Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} &{Name: IP: Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true} + I1102 23:17:53.712654 253591 start.go:125] createHost starting for "" (driver="docker") + I1102 23:17:53.712941 253591 out.go:252] * Creating docker container (CPUs=2, Memory=3072MB) ... + I1102 23:17:53.713050 253591 start.go:159] libmachine.API.Create for "scheduled-stop-206205" (driver="docker") + I1102 23:17:53.713062 253591 client.go:173] LocalClient.Create starting + I1102 23:17:53.713101 253591 main.go:143] libmachine: Reading certificate data from /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem + I1102 23:17:53.713123 253591 main.go:143] libmachine: Decoding PEM data... + I1102 23:17:53.713130 253591 main.go:143] libmachine: Parsing certificate... + I1102 23:17:53.713163 253591 main.go:143] libmachine: Reading certificate data from /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem + I1102 23:17:53.713175 253591 main.go:143] libmachine: Decoding PEM data... + I1102 23:17:53.713182 253591 main.go:143] libmachine: Parsing certificate... + I1102 23:17:53.713370 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + W1102 23:17:53.722515 253591 cli_runner.go:211] docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1 + I1102 23:17:53.722553 253591 network_create.go:284] running [docker network inspect scheduled-stop-206205] to gather additional debugging logs... + I1102 23:17:53.722582 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 + W1102 23:17:53.731296 253591 cli_runner.go:211] docker network inspect scheduled-stop-206205 returned with exit code 1 + I1102 23:17:53.731304 253591 network_create.go:287] error running [docker network inspect scheduled-stop-206205]: docker network inspect scheduled-stop-206205: exit status 1 + stdout: + [] + + stderr: + Error response from daemon: network scheduled-stop-206205 not found + I1102 23:17:53.731309 253591 network_create.go:289] output of [docker network inspect scheduled-stop-206205]: -- stdout -- + [] + + -- /stdout -- + ** stderr ** + Error response from daemon: network scheduled-stop-206205 not found + + ** /stderr ** + I1102 23:17:53.731374 253591 cli_runner.go:164] Run: docker network inspect bridge --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + I1102 23:17:53.741002 253591 network.go:211] skipping subnet 192.168.49.0/24 that is taken: &{IP:192.168.49.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.49.0/24 Gateway:192.168.49.1 ClientMin:192.168.49.2 ClientMax:192.168.49.254 Broadcast:192.168.49.255 IsPrivate:true Interface:{IfaceName:br-c8e5074369ec IfaceIPv4:192.168.49.1 IfaceMTU:1500 IfaceMAC:4e:e7:c6:2d:b8:92} reservation:} + I1102 23:17:53.741112 253591 network.go:211] skipping subnet 192.168.58.0/24 that is taken: &{IP:192.168.58.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.58.0/24 Gateway:192.168.58.1 ClientMin:192.168.58.2 ClientMax:192.168.58.254 Broadcast:192.168.58.255 IsPrivate:true Interface:{IfaceName:br-1155e43987f0 IfaceIPv4:192.168.58.1 IfaceMTU:1500 IfaceMAC:56:c9:4f:10:02:e5} reservation:} + I1102 23:17:53.741219 253591 network.go:211] skipping subnet 192.168.67.0/24 that is taken: &{IP:192.168.67.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.67.0/24 Gateway:192.168.67.1 ClientMin:192.168.67.2 ClientMax:192.168.67.254 Broadcast:192.168.67.255 IsPrivate:true Interface:{IfaceName:br-1a04657d00b3 IfaceIPv4:192.168.67.1 IfaceMTU:1500 IfaceMAC:76:0f:75:c5:83:0d} reservation:} + I1102 23:17:53.741377 253591 network.go:206] using free private subnet 192.168.76.0/24: &{IP:192.168.76.0 Netmask:255.255.255.0 Prefix:24 CIDR:192.168.76.0/24 Gateway:192.168.76.1 ClientMin:192.168.76.2 ClientMax:192.168.76.254 Broadcast:192.168.76.255 IsPrivate:true Interface:{IfaceName: IfaceIPv4: IfaceMTU:0 IfaceMAC:} reservation:0xc001b83cf0} + I1102 23:17:53.741385 253591 network_create.go:124] attempt to create docker network scheduled-stop-206205 192.168.76.0/24 with gateway 192.168.76.1 and MTU of 1500 ... + I1102 23:17:53.741420 253591 cli_runner.go:164] Run: docker network create --driver=bridge --subnet=192.168.76.0/24 --gateway=192.168.76.1 -o --ip-masq -o --icc -o com.docker.network.driver.mtu=1500 --label=created_by.minikube.sigs.k8s.io=true --label=name.minikube.sigs.k8s.io=scheduled-stop-206205 scheduled-stop-206205 + I1102 23:17:53.768719 253591 network_create.go:108] docker network scheduled-stop-206205 192.168.76.0/24 created + I1102 23:17:53.768728 253591 kic.go:121] calculated static IP "192.168.76.2" for the "scheduled-stop-206205" container + I1102 23:17:53.768775 253591 cli_runner.go:164] Run: docker ps -a --format {{.Names}} + I1102 23:17:53.777450 253591 cli_runner.go:164] Run: docker volume create scheduled-stop-206205 --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --label created_by.minikube.sigs.k8s.io=true + I1102 23:17:53.786567 253591 oci.go:103] Successfully created a docker volume scheduled-stop-206205 + I1102 23:17:53.786609 253591 cli_runner.go:164] Run: docker run --rm --name scheduled-stop-206205-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --entrypoint /usr/bin/test -v scheduled-stop-206205:/var gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -d /var/lib + I1102 23:17:54.115618 253591 oci.go:107] Successfully prepared a docker volume scheduled-stop-206205 + I1102 23:17:54.115834 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:17:54.115846 253591 kic.go:194] Starting extracting preloaded images to volume ... + I1102 23:17:54.115898 253591 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-206205:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir + I1102 23:17:57.636208 253591 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.34.1-docker-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v scheduled-stop-206205:/extractDir gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 -I lz4 -xf /preloaded.tar -C /extractDir: (3.520280245s) + I1102 23:17:57.636226 253591 kic.go:203] duration metric: took 3.520377301s to extract preloaded images to volume ... + W1102 23:17:57.636302 253591 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted. + W1102 23:17:57.636321 253591 oci.go:252] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted. + I1102 23:17:57.636368 253591 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'" + I1102 23:17:57.672186 253591 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname scheduled-stop-206205 --name scheduled-stop-206205 --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=scheduled-stop-206205 --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=scheduled-stop-206205 --network scheduled-stop-206205 --ip 192.168.76.2 --volume scheduled-stop-206205:/var --security-opt apparmor=unconfined --memory=3072mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 + I1102 23:17:57.898542 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Running}} + I1102 23:17:57.909434 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:57.918896 253591 cli_runner.go:164] Run: docker exec scheduled-stop-206205 stat /var/lib/dpkg/alternatives/iptables + I1102 23:17:57.949230 253591 oci.go:144] the created container "scheduled-stop-206205" has a running status. + I1102 23:17:57.949250 253591 kic.go:225] Creating ssh key for kic: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa... + I1102 23:17:57.994322 253591 kic_runner.go:191] docker (temp): /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) + I1102 23:17:58.005867 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:58.015315 253591 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys + I1102 23:17:58.015321 253591 kic_runner.go:114] Args: [docker exec --privileged scheduled-stop-206205 chown docker:docker /home/docker/.ssh/authorized_keys] + I1102 23:17:58.040815 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:17:58.051004 253591 machine.go:94] provisionDockerMachine start ... + I1102 23:17:58.051069 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:17:58.062794 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:17:58.062986 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:17:58.062993 253591 main.go:143] libmachine: About to run SSH command: + hostname + I1102 23:17:58.063450 253591 main.go:143] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:59844->127.0.0.1:32969: read: connection reset by peer + I1102 23:18:01.183913 253591 main.go:143] libmachine: SSH cmd err, output: : scheduled-stop-206205 + + I1102 23:18:01.183945 253591 ubuntu.go:182] provisioning hostname "scheduled-stop-206205" + I1102 23:18:01.184016 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.195923 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.196086 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.196091 253591 main.go:143] libmachine: About to run SSH command: + sudo hostname scheduled-stop-206205 && echo "scheduled-stop-206205" | sudo tee /etc/hostname + I1102 23:18:01.320196 253591 main.go:143] libmachine: SSH cmd err, output: : scheduled-stop-206205 + + I1102 23:18:01.320254 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.331285 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.331421 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.331428 253591 main.go:143] libmachine: About to run SSH command: + + if ! grep -xq '.*\sscheduled-stop-206205' /etc/hosts; then + if grep -xq '127.0.1.1\s.*' /etc/hosts; then + sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 scheduled-stop-206205/g' /etc/hosts; + else + echo '127.0.1.1 scheduled-stop-206205' | sudo tee -a /etc/hosts; + fi + fi + I1102 23:18:01.448573 253591 main.go:143] libmachine: SSH cmd err, output: : + I1102 23:18:01.448587 253591 ubuntu.go:188] set auth options {CertDir:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube CaCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem CaPrivateKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem ServerKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server-key.pem ClientKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube} + I1102 23:18:01.448596 253591 ubuntu.go:190] setting up certificates + I1102 23:18:01.448602 253591 provision.go:84] configureAuth start + I1102 23:18:01.448643 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:01.459070 253591 provision.go:143] copyHostCerts + I1102 23:18:01.459118 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem, removing ... + I1102 23:18:01.459122 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem + I1102 23:18:01.459174 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/cert.pem (1127 bytes) + I1102 23:18:01.459247 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem, removing ... + I1102 23:18:01.459249 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem + I1102 23:18:01.459269 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/key.pem (1675 bytes) + I1102 23:18:01.459314 253591 exec_runner.go:144] found /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem, removing ... + I1102 23:18:01.459316 253591 exec_runner.go:203] rm: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem + I1102 23:18:01.459339 253591 exec_runner.go:151] cp: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem --> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.pem (1082 bytes) + I1102 23:18:01.459376 253591 provision.go:117] generating server cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem ca-key=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem private-key=/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem org=minikube.scheduled-stop-206205 san=[127.0.0.1 192.168.76.2 localhost minikube scheduled-stop-206205] + I1102 23:18:01.577357 253591 provision.go:177] copyRemoteCerts + I1102 23:18:01.577390 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker + I1102 23:18:01.577415 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.588288 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:01.673715 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server.pem --> /etc/docker/server.pem (1233 bytes) + I1102 23:18:01.684733 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1675 bytes) + I1102 23:18:01.695653 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1082 bytes) + I1102 23:18:01.706714 253591 provision.go:87] duration metric: took 258.106022ms to configureAuth + I1102 23:18:01.706725 253591 ubuntu.go:206] setting minikube options for container-runtime + I1102 23:18:01.706839 253591 config.go:182] Loaded profile config "scheduled-stop-206205": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:18:01.706872 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.716737 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.716859 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.716862 253591 main.go:143] libmachine: About to run SSH command: + df --output=fstype / | tail -n 1 + I1102 23:18:01.834233 253591 main.go:143] libmachine: SSH cmd err, output: : overlay + + I1102 23:18:01.834239 253591 ubuntu.go:71] root file system type: overlay + I1102 23:18:01.834305 253591 provision.go:314] Updating docker unit: /lib/systemd/system/docker.service ... + I1102 23:18:01.834344 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.844396 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.844526 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.844561 253591 main.go:143] libmachine: About to run SSH command: + sudo mkdir -p /lib/systemd/system && printf %s "[Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 \ + -H fd:// --containerd=/run/containerd/containerd.sock \ + -H unix:///var/run/docker.sock \ + --default-ulimit=nofile=1048576:1048576 \ + --tlsverify \ + --tlscacert /etc/docker/ca.pem \ + --tlscert /etc/docker/server.pem \ + --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP \$MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + " | sudo tee /lib/systemd/system/docker.service.new + I1102 23:18:01.969042 253591 main.go:143] libmachine: SSH cmd err, output: : [Unit] + Description=Docker Application Container Engine + Documentation=https://docs.docker.com + After=network-online.target nss-lookup.target docker.socket firewalld.service containerd.service time-set.target + Wants=network-online.target containerd.service + Requires=docker.socket + StartLimitBurst=3 + StartLimitIntervalSec=60 + + [Service] + Type=notify + Restart=always + + + + # This file is a systemd drop-in unit that inherits from the base dockerd configuration. + # The base configuration already specifies an 'ExecStart=...' command. The first directive + # here is to clear out that command inherited from the base configuration. Without this, + # the command from the base configuration and the command specified here are treated as + # a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + # will catch this invalid input and refuse to start the service with an error like: + # Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + # NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + # container runtimes. If left unlimited, it may result in OOM issues with MySQL. + ExecStart= + ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + ExecReload=/bin/kill -s HUP $MAINPID + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + # Uncomment TasksMax if your systemd version supports it. + # Only systemd 226 and above support this version. + TasksMax=infinity + TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + + # kill only the docker process, not all processes in the cgroup + KillMode=process + OOMScoreAdjust=-500 + + [Install] + WantedBy=multi-user.target + + I1102 23:18:01.969085 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:01.978588 253591 main.go:143] libmachine: Using SSH client type: native + I1102 23:18:01.978711 253591 main.go:143] libmachine: &{{{ 0 [] [] []} docker [0x841760] 0x844460 [] 0s} 127.0.0.1 32969 } + I1102 23:18:01.978718 253591 main.go:143] libmachine: About to run SSH command: + sudo diff -u /lib/systemd/system/docker.service /lib/systemd/system/docker.service.new || { sudo mv /lib/systemd/system/docker.service.new /lib/systemd/system/docker.service; sudo systemctl -f daemon-reload && sudo systemctl -f enable docker && sudo systemctl -f restart docker; } + I1102 23:18:02.484307 253591 main.go:143] libmachine: SSH cmd err, output: : --- /lib/systemd/system/docker.service 2025-10-08 12:15:50.000000000 +0000 + +++ /lib/systemd/system/docker.service.new 2025-11-02 23:18:01.967025868 +0000 + @@ -9,23 +9,34 @@ + + [Service] + Type=notify + -# the default is not to use systemd for cgroups because the delegate issues still + -# exists and systemd currently does not support the cgroup feature set required + -# for containers run by docker + -ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock + -ExecReload=/bin/kill -s HUP $MAINPID + -TimeoutStartSec=0 + -RestartSec=2 + Restart=always + + + + + + +# This file is a systemd drop-in unit that inherits from the base dockerd configuration. + +# The base configuration already specifies an 'ExecStart=...' command. The first directive + +# here is to clear out that command inherited from the base configuration. Without this, + +# the command from the base configuration and the command specified here are treated as + +# a sequence of commands, which is not the desired behavior, nor is it valid -- systemd + +# will catch this invalid input and refuse to start the service with an error like: + +# Service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. + + + +# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other + +# container runtimes. If left unlimited, it may result in OOM issues with MySQL. + +ExecStart= + +ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H fd:// --containerd=/run/containerd/containerd.sock -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem --label provider=docker --insecure-registry 10.96.0.0/12 + +ExecReload=/bin/kill -s HUP $MAINPID + + + # Having non-zero Limit*s causes performance problems due to accounting overhead + # in the kernel. We recommend using cgroups to do container-local accounting. + +LimitNOFILE=infinity + LimitNPROC=infinity + LimitCORE=infinity + + -# Comment TasksMax if your systemd version does not support it. + -# Only systemd 226 and above support this option. + +# Uncomment TasksMax if your systemd version supports it. + +# Only systemd 226 and above support this version. + TasksMax=infinity + +TimeoutStartSec=0 + + # set delegate yes so that systemd does not reset the cgroups of docker containers + Delegate=yes + Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install. + Executing: /lib/systemd/systemd-sysv-install enable docker + + I1102 23:18:02.484318 253591 machine.go:97] duration metric: took 4.433308035s to provisionDockerMachine + I1102 23:18:02.484325 253591 client.go:176] duration metric: took 8.771259893s to LocalClient.Create + I1102 23:18:02.484334 253591 start.go:167] duration metric: took 8.771282789s to libmachine.API.Create "scheduled-stop-206205" + I1102 23:18:02.484338 253591 start.go:293] postStartSetup for "scheduled-stop-206205" (driver="docker") + I1102 23:18:02.484344 253591 start.go:322] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] + I1102 23:18:02.484397 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs + I1102 23:18:02.484429 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.495074 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.579870 253591 ssh_runner.go:195] Run: cat /etc/os-release + I1102 23:18:02.581857 253591 main.go:143] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found + I1102 23:18:02.581866 253591 info.go:137] Remote host: Debian GNU/Linux 12 (bookworm) + I1102 23:18:02.581872 253591 filesync.go:126] Scanning /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/addons for local assets ... + I1102 23:18:02.581930 253591 filesync.go:126] Scanning /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files for local assets ... + I1102 23:18:02.582001 253591 filesync.go:149] local asset: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem -> 378692.pem in /etc/ssl/certs + I1102 23:18:02.582092 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/ssl/certs + I1102 23:18:02.586576 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem --> /etc/ssl/certs/378692.pem (1708 bytes) + I1102 23:18:02.597463 253591 start.go:296] duration metric: took 113.117075ms for postStartSetup + I1102 23:18:02.597741 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:02.607698 253591 profile.go:143] Saving config to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/config.json ... + I1102 23:18:02.607906 253591 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" + I1102 23:18:02.607947 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.617590 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.700643 253591 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'" + I1102 23:18:02.703460 253591 start.go:128] duration metric: took 8.990801049s to createHost + I1102 23:18:02.703467 253591 start.go:83] releasing machines lock for "scheduled-stop-206205", held for 8.990856965s + I1102 23:18:02.703518 253591 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" scheduled-stop-206205 + I1102 23:18:02.713584 253591 ssh_runner.go:195] Run: cat /version.json + I1102 23:18:02.713608 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.713674 253591 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/ + I1102 23:18:02.713716 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:02.724014 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.724375 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:02.807050 253591 ssh_runner.go:195] Run: systemctl --version + I1102 23:18:02.875009 253591 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*" + W1102 23:18:02.877650 253591 cni.go:209] loopback cni configuration skipped: "/etc/cni/net.d/*loopback.conf*" not found + I1102 23:18:02.877697 253591 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%p, " -exec sh -c "sudo mv {} {}.mk_disabled" ; + I1102 23:18:02.890820 253591 cni.go:262] disabled [/etc/cni/net.d/10-crio-bridge.conflist.disabled, /etc/cni/net.d/87-podman-bridge.conflist] bridge cni config(s) + I1102 23:18:02.890826 253591 start.go:496] detecting cgroup driver to use... + I1102 23:18:02.890842 253591 detect.go:190] detected "systemd" cgroup driver on host os + I1102 23:18:02.890905 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///run/containerd/containerd.sock + " | sudo tee /etc/crictl.yaml" + I1102 23:18:02.899415 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.10.1"|' /etc/containerd/config.toml" + I1102 23:18:02.904831 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml" + I1102 23:18:02.910257 253591 containerd.go:146] configuring containerd to use "systemd" as cgroup driver... + I1102 23:18:02.910293 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml" + I1102 23:18:02.915564 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" + I1102 23:18:02.920719 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml" + I1102 23:18:02.925871 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" + I1102 23:18:02.931165 253591 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk" + I1102 23:18:02.936119 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml" + I1102 23:18:02.941336 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i '/^ *enable_unprivileged_ports = .*/d' /etc/containerd/config.toml" + I1102 23:18:02.946588 253591 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)\[plugins."io.containerd.grpc.v1.cri"\]|&\n\1 enable_unprivileged_ports = true|' /etc/containerd/config.toml" + I1102 23:18:02.951864 253591 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables + I1102 23:18:02.956385 253591 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" + I1102 23:18:02.960861 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.011321 253591 ssh_runner.go:195] Run: sudo systemctl restart containerd + I1102 23:18:03.065100 253591 start.go:496] detecting cgroup driver to use... + I1102 23:18:03.065116 253591 detect.go:190] detected "systemd" cgroup driver on host os + I1102 23:18:03.065157 253591 ssh_runner.go:195] Run: sudo systemctl cat docker.service + I1102 23:18:03.072378 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd + I1102 23:18:03.079138 253591 ssh_runner.go:195] Run: sudo systemctl stop -f containerd + I1102 23:18:03.088438 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service containerd + I1102 23:18:03.095302 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio + I1102 23:18:03.102214 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %s "runtime-endpoint: unix:///var/run/cri-dockerd.sock + " | sudo tee /etc/crictl.yaml" + I1102 23:18:03.110662 253591 ssh_runner.go:195] Run: which cri-dockerd + I1102 23:18:03.112679 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/cri-docker.service.d + I1102 23:18:03.117226 253591 ssh_runner.go:362] scp memory --> /etc/systemd/system/cri-docker.service.d/10-cni.conf (192 bytes) + I1102 23:18:03.124875 253591 ssh_runner.go:195] Run: sudo systemctl unmask docker.service + I1102 23:18:03.174772 253591 ssh_runner.go:195] Run: sudo systemctl enable docker.socket + I1102 23:18:03.224815 253591 docker.go:575] configuring docker to use "systemd" as cgroup driver... + I1102 23:18:03.224868 253591 ssh_runner.go:362] scp memory --> /etc/docker/daemon.json (129 bytes) + I1102 23:18:03.232349 253591 ssh_runner.go:195] Run: sudo systemctl reset-failed docker + I1102 23:18:03.239110 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.287733 253591 ssh_runner.go:195] Run: sudo systemctl restart docker + I1102 23:18:03.514202 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker + I1102 23:18:03.521123 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.socket + I1102 23:18:03.528294 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service + I1102 23:18:03.535614 253591 ssh_runner.go:195] Run: sudo systemctl unmask cri-docker.socket + I1102 23:18:03.587693 253591 ssh_runner.go:195] Run: sudo systemctl enable cri-docker.socket + I1102 23:18:03.633586 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.680333 253591 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.socket + I1102 23:18:03.695969 253591 ssh_runner.go:195] Run: sudo systemctl reset-failed cri-docker.service + I1102 23:18:03.702678 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:03.750398 253591 ssh_runner.go:195] Run: sudo systemctl restart cri-docker.service + I1102 23:18:03.798855 253591 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service cri-docker.service + I1102 23:18:03.805577 253591 start.go:543] Will wait 60s for socket path /var/run/cri-dockerd.sock + I1102 23:18:03.805613 253591 ssh_runner.go:195] Run: stat /var/run/cri-dockerd.sock + I1102 23:18:03.807636 253591 start.go:564] Will wait 60s for crictl version + I1102 23:18:03.807673 253591 ssh_runner.go:195] Run: which crictl + I1102 23:18:03.809563 253591 ssh_runner.go:195] Run: sudo /usr/local/bin/crictl version + I1102 23:18:03.823581 253591 start.go:580] Version: 0.1.0 + RuntimeName: docker + RuntimeVersion: 28.5.1 + RuntimeApiVersion: v1 + I1102 23:18:03.823630 253591 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} + I1102 23:18:03.837461 253591 ssh_runner.go:195] Run: docker version --format {{.Server.Version}} + I1102 23:18:03.851467 253591 out.go:252] * Preparing Kubernetes v1.34.1 on Docker 28.5.1 ... + I1102 23:18:03.851513 253591 cli_runner.go:164] Run: docker network inspect scheduled-stop-206205 --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" + I1102 23:18:03.862865 253591 ssh_runner.go:195] Run: grep 192.168.76.1 host.minikube.internal$ /etc/hosts + I1102 23:18:03.864895 253591 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\thost.minikube.internal$' "/etc/hosts"; echo "192.168.76.1 host.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" + I1102 23:18:03.870983 253591 kubeadm.go:884] updating cluster {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} ... + I1102 23:18:03.871038 253591 preload.go:183] Checking if preload exists for k8s version v1.34.1 and runtime docker + I1102 23:18:03.871073 253591 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} + I1102 23:18:03.882220 253591 docker.go:691] Got preloaded images: -- stdout -- + registry.k8s.io/kube-scheduler:v1.34.1 + registry.k8s.io/kube-apiserver:v1.34.1 + registry.k8s.io/kube-controller-manager:v1.34.1 + registry.k8s.io/kube-proxy:v1.34.1 + registry.k8s.io/etcd:3.6.4-0 + registry.k8s.io/pause:3.10.1 + registry.k8s.io/coredns/coredns:v1.12.1 + gcr.io/k8s-minikube/storage-provisioner:v5 + + -- /stdout -- + I1102 23:18:03.882226 253591 docker.go:621] Images already preloaded, skipping extraction + I1102 23:18:03.882267 253591 ssh_runner.go:195] Run: docker images --format {{.Repository}}:{{.Tag}} + I1102 23:18:03.893738 253591 docker.go:691] Got preloaded images: -- stdout -- + registry.k8s.io/kube-scheduler:v1.34.1 + registry.k8s.io/kube-apiserver:v1.34.1 + registry.k8s.io/kube-controller-manager:v1.34.1 + registry.k8s.io/kube-proxy:v1.34.1 + registry.k8s.io/etcd:3.6.4-0 + registry.k8s.io/pause:3.10.1 + registry.k8s.io/coredns/coredns:v1.12.1 + gcr.io/k8s-minikube/storage-provisioner:v5 + + -- /stdout -- + I1102 23:18:03.893746 253591 cache_images.go:86] Images are preloaded, skipping loading + I1102 23:18:03.893752 253591 kubeadm.go:935] updating node { 192.168.76.2 8443 v1.34.1 docker true true} ... + I1102 23:18:03.893809 253591 kubeadm.go:947] kubelet [Unit] + Wants=docker.socket + + [Service] + ExecStart= + ExecStart=/var/lib/minikube/binaries/v1.34.1/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --hostname-override=scheduled-stop-206205 --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.76.2 + + [Install] + config: + {KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} + I1102 23:18:03.893851 253591 ssh_runner.go:195] Run: docker info --format {{.CgroupDriver}} + I1102 23:18:03.921973 253591 cni.go:84] Creating CNI manager for "" + I1102 23:18:03.921989 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:18:03.922001 253591 kubeadm.go:85] Using pod CIDR: 10.244.0.0/16 + I1102 23:18:03.922012 253591 kubeadm.go:190] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:192.168.76.2 APIServerPort:8443 KubernetesVersion:v1.34.1 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:scheduled-stop-206205 NodeName:scheduled-stop-206205 DNSDomain:cluster.local CRISocket:/var/run/cri-dockerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "192.168.76.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:192.168.76.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[containerRuntimeEndpoint:unix:///var/run/cri-dockerd.sock hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true} + I1102 23:18:03.922091 253591 kubeadm.go:196] kubeadm config: + apiVersion: kubeadm.k8s.io/v1beta4 + kind: InitConfiguration + localAPIEndpoint: + advertiseAddress: 192.168.76.2 + bindPort: 8443 + bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication + nodeRegistration: + criSocket: unix:///var/run/cri-dockerd.sock + name: "scheduled-stop-206205" + kubeletExtraArgs: + - name: "node-ip" + value: "192.168.76.2" + taints: [] + --- + apiVersion: kubeadm.k8s.io/v1beta4 + kind: ClusterConfiguration + apiServer: + certSANs: ["127.0.0.1", "localhost", "192.168.76.2"] + extraArgs: + - name: "enable-admission-plugins" + value: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + controllerManager: + extraArgs: + - name: "allocate-node-cidrs" + value: "true" + - name: "leader-elect" + value: "false" + scheduler: + extraArgs: + - name: "leader-elect" + value: "false" + certificatesDir: /var/lib/minikube/certs + clusterName: mk + controlPlaneEndpoint: control-plane.minikube.internal:8443 + etcd: + local: + dataDir: /var/lib/minikube/etcd + kubernetesVersion: v1.34.1 + networking: + dnsDomain: cluster.local + podSubnet: "10.244.0.0/16" + serviceSubnet: 10.96.0.0/12 + --- + apiVersion: kubelet.config.k8s.io/v1beta1 + kind: KubeletConfiguration + authentication: + x509: + clientCAFile: /var/lib/minikube/certs/ca.crt + cgroupDriver: systemd + containerRuntimeEndpoint: unix:///var/run/cri-dockerd.sock + hairpinMode: hairpin-veth + runtimeRequestTimeout: 15m + clusterDomain: "cluster.local" + # disable disk resource management by default + imageGCHighThresholdPercent: 100 + evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" + failSwapOn: false + staticPodPath: /etc/kubernetes/manifests + --- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + clusterCIDR: "10.244.0.0/16" + metricsBindAddress: 0.0.0.0:10249 + conntrack: + maxPerCore: 0 + # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" + tcpEstablishedTimeout: 0s + # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" + tcpCloseWaitTimeout: 0s + + I1102 23:18:03.922133 253591 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.34.1 + I1102 23:18:03.926603 253591 binaries.go:44] Found k8s binaries, skipping transfer + I1102 23:18:03.926638 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube + I1102 23:18:03.931622 253591 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (320 bytes) + I1102 23:18:03.939311 253591 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes) + I1102 23:18:03.947048 253591 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2221 bytes) + I1102 23:18:03.954600 253591 ssh_runner.go:195] Run: grep 192.168.76.2 control-plane.minikube.internal$ /etc/hosts + I1102 23:18:03.956721 253591 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "192.168.76.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" + I1102 23:18:03.963188 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:04.023952 253591 ssh_runner.go:195] Run: sudo systemctl start kubelet + I1102 23:18:04.044114 253591 certs.go:69] Setting up /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205 for IP: 192.168.76.2 + I1102 23:18:04.044119 253591 certs.go:195] generating shared ca certs ... + I1102 23:18:04.044130 253591 certs.go:227] acquiring lock for ca certs: {Name:mka14e8ec45c47b14f993e33375454668e25d494 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.044249 253591 certs.go:236] skipping valid "minikubeCA" ca cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.key + I1102 23:18:04.044280 253591 certs.go:236] skipping valid "proxyClientCA" ca cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.key + I1102 23:18:04.044284 253591 certs.go:257] generating profile certs ... + I1102 23:18:04.044325 253591 certs.go:364] generating signed profile cert for "minikube-user": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key + I1102 23:18:04.044340 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt with IP's: [] + I1102 23:18:04.208998 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt ... + I1102 23:18:04.209006 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.crt: {Name:mk4242b9869b318af135d5a04404d4efc6cf682e Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.209130 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key ... + I1102 23:18:04.209135 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/client.key: {Name:mk897a441a744e4deb312bb31d7e45f305393cb5 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.209203 253591 certs.go:364] generating signed profile cert for "minikube": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c + I1102 23:18:04.209219 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c with IP's: [10.96.0.1 127.0.0.1 10.0.0.1 192.168.76.2] + I1102 23:18:04.436006 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c ... + I1102 23:18:04.436013 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c: {Name:mk049a160b33ba00c531396324e27b089b294b76 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.436141 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c ... + I1102 23:18:04.436145 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c: {Name:mkbab4dc88cc36ed893a75442190fa04204c9d4c Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.436195 253591 certs.go:382] copying /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt.d67ede0c -> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt + I1102 23:18:04.436247 253591 certs.go:386] copying /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key.d67ede0c -> /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key + I1102 23:18:04.436289 253591 certs.go:364] generating signed profile cert for "aggregator": /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key + I1102 23:18:04.436304 253591 crypto.go:68] Generating cert /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt with IP's: [] + I1102 23:18:04.486571 253591 crypto.go:156] Writing cert to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt ... + I1102 23:18:04.486578 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt: {Name:mkc557a3c4e4c839becbe50d45b8ff85e88e77e1 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.486686 253591 crypto.go:164] Writing key to /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key ... + I1102 23:18:04.486692 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key: {Name:mk133af299b17c585e172e986c844eb5075cf70d Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:04.486845 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869.pem (1338 bytes) + W1102 23:18:04.486870 253591 certs.go:480] ignoring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869_empty.pem, impossibly tiny 0 bytes + I1102 23:18:04.486875 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca-key.pem (1675 bytes) + I1102 23:18:04.486892 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/ca.pem (1082 bytes) + I1102 23:18:04.486908 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/cert.pem (1127 bytes) + I1102 23:18:04.486943 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/key.pem (1675 bytes) + I1102 23:18:04.486978 253591 certs.go:484] found cert: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem (1708 bytes) + I1102 23:18:04.487364 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes) + I1102 23:18:04.498438 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1675 bytes) + I1102 23:18:04.509333 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes) + I1102 23:18:04.520074 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes) + I1102 23:18:04.530681 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1432 bytes) + I1102 23:18:04.541415 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1675 bytes) + I1102 23:18:04.551866 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes) + I1102 23:18:04.562486 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/profiles/scheduled-stop-206205/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1679 bytes) + I1102 23:18:04.573100 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/files/etc/ssl/certs/378692.pem --> /usr/share/ca-certificates/378692.pem (1708 bytes) + I1102 23:18:04.583855 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes) + I1102 23:18:04.595385 253591 ssh_runner.go:362] scp /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/certs/37869.pem --> /usr/share/ca-certificates/37869.pem (1338 bytes) + I1102 23:18:04.605855 253591 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes) + I1102 23:18:04.613625 253591 ssh_runner.go:195] Run: openssl version + I1102 23:18:04.616906 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" + I1102 23:18:04.621840 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.624171 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1111 Nov 2 22:47 /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.624213 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem + I1102 23:18:04.641639 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" + I1102 23:18:04.646458 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/37869.pem && ln -fs /usr/share/ca-certificates/37869.pem /etc/ssl/certs/37869.pem" + I1102 23:18:04.651254 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/37869.pem + I1102 23:18:04.653315 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1338 Nov 2 22:52 /usr/share/ca-certificates/37869.pem + I1102 23:18:04.653356 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/37869.pem + I1102 23:18:04.670542 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/51391683.0 || ln -fs /etc/ssl/certs/37869.pem /etc/ssl/certs/51391683.0" + I1102 23:18:04.675585 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/378692.pem && ln -fs /usr/share/ca-certificates/378692.pem /etc/ssl/certs/378692.pem" + I1102 23:18:04.680499 253591 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/378692.pem + I1102 23:18:04.682640 253591 certs.go:528] hashing: -rw-r--r-- 1 root root 1708 Nov 2 22:52 /usr/share/ca-certificates/378692.pem + I1102 23:18:04.682670 253591 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/378692.pem + I1102 23:18:04.699999 253591 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/3ec20f2e.0 || ln -fs /etc/ssl/certs/378692.pem /etc/ssl/certs/3ec20f2e.0" + I1102 23:18:04.704862 253591 ssh_runner.go:195] Run: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt + I1102 23:18:04.706859 253591 certs.go:400] 'apiserver-kubelet-client' cert doesn't exist, likely first start: stat /var/lib/minikube/certs/apiserver-kubelet-client.crt: Process exited with status 1 + stdout: + + stderr: + stat: cannot statx '/var/lib/minikube/certs/apiserver-kubelet-client.crt': No such file or directory + I1102 23:18:04.706882 253591 kubeadm.go:401] StartCluster: {Name:scheduled-stop-206205 KeepContext:false EmbedCerts:false MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase-builds:v0.0.48-1760939008-21773@sha256:d8d8a3f29f027433bea12764bddd1aa26c7ad9bb912e016c1bc51278db1343d8 Memory:3072 CPUs:2 DiskSize:20000 Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:8443 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.34.1 ClusterName:scheduled-stop-206205 Namespace:default APIServerHAVIP: APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:docker CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:} Nodes:[{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network: Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s MountString: Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false DisableCoreDNSLog:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0 GPUs: AutoPauseInterval:1m0s} + I1102 23:18:04.706959 253591 ssh_runner.go:195] Run: docker ps --filter status=paused --filter=name=k8s_.*_(kube-system)_ --format={{.ID}} + I1102 23:18:04.717718 253591 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd + I1102 23:18:04.722208 253591 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml + I1102 23:18:04.726655 253591 kubeadm.go:215] ignoring SystemVerification for kubeadm because of docker driver + I1102 23:18:04.726685 253591 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf + I1102 23:18:04.731168 253591 kubeadm.go:156] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 + stdout: + + stderr: + ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory + ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory + ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory + ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory + I1102 23:18:04.731172 253591 kubeadm.go:158] found existing configuration files: + + I1102 23:18:04.731205 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf + I1102 23:18:04.735630 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/admin.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/admin.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/admin.conf: No such file or directory + I1102 23:18:04.735662 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/admin.conf + I1102 23:18:04.740078 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf + I1102 23:18:04.744424 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/kubelet.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/kubelet.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/kubelet.conf: No such file or directory + I1102 23:18:04.744460 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/kubelet.conf + I1102 23:18:04.748755 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf + I1102 23:18:04.753213 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/controller-manager.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/controller-manager.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/controller-manager.conf: No such file or directory + I1102 23:18:04.753244 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/controller-manager.conf + I1102 23:18:04.757425 253591 ssh_runner.go:195] Run: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf + I1102 23:18:04.762105 253591 kubeadm.go:164] "https://control-plane.minikube.internal:8443" may not be in /etc/kubernetes/scheduler.conf - will remove: sudo grep https://control-plane.minikube.internal:8443 /etc/kubernetes/scheduler.conf: Process exited with status 2 + stdout: + + stderr: + grep: /etc/kubernetes/scheduler.conf: No such file or directory + I1102 23:18:04.762141 253591 ssh_runner.go:195] Run: sudo rm -f /etc/kubernetes/scheduler.conf + I1102 23:18:04.766828 253591 ssh_runner.go:286] Start: sudo /bin/bash -c "env PATH="/var/lib/minikube/binaries/v1.34.1:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" + I1102 23:18:04.788502 253591 kubeadm.go:319] [init] Using Kubernetes version: v1.34.1 + I1102 23:18:04.788535 253591 kubeadm.go:319] [preflight] Running pre-flight checks + I1102 23:18:04.836991 253591 kubeadm.go:319] [preflight] Pulling images required for setting up a Kubernetes cluster + I1102 23:18:04.837059 253591 kubeadm.go:319] [preflight] This might take a minute or two, depending on the speed of your internet connection + I1102 23:18:04.837119 253591 kubeadm.go:319] [preflight] You can also perform this action beforehand using 'kubeadm config images pull' + I1102 23:18:04.843731 253591 kubeadm.go:319] [certs] Using certificateDir folder "/var/lib/minikube/certs" + I1102 23:18:04.844227 253591 out.go:252] - Generating certificates and keys ... + I1102 23:18:04.844272 253591 kubeadm.go:319] [certs] Using existing ca certificate authority + I1102 23:18:04.844306 253591 kubeadm.go:319] [certs] Using existing apiserver certificate and key on disk + I1102 23:18:04.925670 253591 kubeadm.go:319] [certs] Generating "apiserver-kubelet-client" certificate and key + I1102 23:18:05.123791 253591 kubeadm.go:319] [certs] Generating "front-proxy-ca" certificate and key + I1102 23:18:05.302844 253591 kubeadm.go:319] [certs] Generating "front-proxy-client" certificate and key + I1102 23:18:05.402793 253591 kubeadm.go:319] [certs] Generating "etcd/ca" certificate and key + I1102 23:18:05.657457 253591 kubeadm.go:319] [certs] Generating "etcd/server" certificate and key + I1102 23:18:05.657534 253591 kubeadm.go:319] [certs] etcd/server serving cert is signed for DNS names [localhost scheduled-stop-206205] and IPs [192.168.76.2 127.0.0.1 ::1] + I1102 23:18:05.867167 253591 kubeadm.go:319] [certs] Generating "etcd/peer" certificate and key + I1102 23:18:05.867272 253591 kubeadm.go:319] [certs] etcd/peer serving cert is signed for DNS names [localhost scheduled-stop-206205] and IPs [192.168.76.2 127.0.0.1 ::1] + I1102 23:18:06.006321 253591 kubeadm.go:319] [certs] Generating "etcd/healthcheck-client" certificate and key + I1102 23:18:06.020741 253591 kubeadm.go:319] [certs] Generating "apiserver-etcd-client" certificate and key + I1102 23:18:06.154439 253591 kubeadm.go:319] [certs] Generating "sa" key and public key + I1102 23:18:06.154498 253591 kubeadm.go:319] [kubeconfig] Using kubeconfig folder "/etc/kubernetes" + I1102 23:18:06.290009 253591 kubeadm.go:319] [kubeconfig] Writing "admin.conf" kubeconfig file + I1102 23:18:06.387763 253591 kubeadm.go:319] [kubeconfig] Writing "super-admin.conf" kubeconfig file + I1102 23:18:06.443867 253591 kubeadm.go:319] [kubeconfig] Writing "kubelet.conf" kubeconfig file + I1102 23:18:06.726384 253591 kubeadm.go:319] [kubeconfig] Writing "controller-manager.conf" kubeconfig file + I1102 23:18:06.903302 253591 kubeadm.go:319] [kubeconfig] Writing "scheduler.conf" kubeconfig file + I1102 23:18:06.903667 253591 kubeadm.go:319] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" + I1102 23:18:06.904817 253591 kubeadm.go:319] [control-plane] Using manifest folder "/etc/kubernetes/manifests" + I1102 23:18:06.905155 253591 out.go:252] - Booting up control plane ... + I1102 23:18:06.905207 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-apiserver" + I1102 23:18:06.905368 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-controller-manager" + I1102 23:18:06.905771 253591 kubeadm.go:319] [control-plane] Creating static Pod manifest for "kube-scheduler" + I1102 23:18:06.923260 253591 kubeadm.go:319] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" + I1102 23:18:06.923333 253591 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/instance-config.yaml" + I1102 23:18:06.926911 253591 kubeadm.go:319] [patches] Applied patch of type "application/strategic-merge-patch+json" to target "kubeletconfiguration" + I1102 23:18:06.927029 253591 kubeadm.go:319] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" + I1102 23:18:06.927059 253591 kubeadm.go:319] [kubelet-start] Starting the kubelet + I1102 23:18:06.999634 253591 kubeadm.go:319] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests" + I1102 23:18:06.999719 253591 kubeadm.go:319] [kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s + I1102 23:18:07.500751 253591 kubeadm.go:319] [kubelet-check] The kubelet is healthy after 501.177161ms + I1102 23:18:07.503256 253591 kubeadm.go:319] [control-plane-check] Waiting for healthy control plane components. This can take up to 4m0s + I1102 23:18:07.503330 253591 kubeadm.go:319] [control-plane-check] Checking kube-apiserver at https://192.168.76.2:8443/livez + I1102 23:18:07.503391 253591 kubeadm.go:319] [control-plane-check] Checking kube-controller-manager at https://127.0.0.1:10257/healthz + I1102 23:18:07.503444 253591 kubeadm.go:319] [control-plane-check] Checking kube-scheduler at https://127.0.0.1:10259/livez + I1102 23:18:08.102936 253591 kubeadm.go:319] [control-plane-check] kube-controller-manager is healthy after 599.534087ms + I1102 23:18:08.676079 253591 kubeadm.go:319] [control-plane-check] kube-scheduler is healthy after 1.172708516s + I1102 23:18:10.004930 253591 kubeadm.go:319] [control-plane-check] kube-apiserver is healthy after 2.501559348s + I1102 23:18:10.010088 253591 kubeadm.go:319] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace + I1102 23:18:10.014186 253591 kubeadm.go:319] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster + I1102 23:18:10.018106 253591 kubeadm.go:319] [upload-certs] Skipping phase. Please see --upload-certs + I1102 23:18:10.018230 253591 kubeadm.go:319] [mark-control-plane] Marking the node scheduled-stop-206205 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] + I1102 23:18:10.021761 253591 kubeadm.go:319] [bootstrap-token] Using token: zzot1a.o97su5nifgm8hbsf + I1102 23:18:10.022194 253591 out.go:252] - Configuring RBAC rules ... + I1102 23:18:10.022262 253591 kubeadm.go:319] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles + I1102 23:18:10.023883 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes + I1102 23:18:10.026229 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials + I1102 23:18:10.027749 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token + I1102 23:18:10.028885 253591 kubeadm.go:319] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster + I1102 23:18:10.030110 253591 kubeadm.go:319] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace + I1102 23:18:10.408133 253591 kubeadm.go:319] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key + I1102 23:18:10.814377 253591 kubeadm.go:319] [addons] Applied essential addon: CoreDNS + I1102 23:18:11.407099 253591 kubeadm.go:319] [addons] Applied essential addon: kube-proxy + I1102 23:18:11.407521 253591 kubeadm.go:319] + I1102 23:18:11.407572 253591 kubeadm.go:319] Your Kubernetes control-plane has initialized successfully! + I1102 23:18:11.407575 253591 kubeadm.go:319] + I1102 23:18:11.407619 253591 kubeadm.go:319] To start using your cluster, you need to run the following as a regular user: + I1102 23:18:11.407622 253591 kubeadm.go:319] + I1102 23:18:11.407635 253591 kubeadm.go:319] mkdir -p $HOME/.kube + I1102 23:18:11.407697 253591 kubeadm.go:319] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + I1102 23:18:11.407728 253591 kubeadm.go:319] sudo chown $(id -u):$(id -g) $HOME/.kube/config + I1102 23:18:11.407730 253591 kubeadm.go:319] + I1102 23:18:11.407773 253591 kubeadm.go:319] Alternatively, if you are the root user, you can run: + I1102 23:18:11.407780 253591 kubeadm.go:319] + I1102 23:18:11.407826 253591 kubeadm.go:319] export KUBECONFIG=/etc/kubernetes/admin.conf + I1102 23:18:11.407828 253591 kubeadm.go:319] + I1102 23:18:11.407862 253591 kubeadm.go:319] You should now deploy a pod network to the cluster. + I1102 23:18:11.407927 253591 kubeadm.go:319] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: + I1102 23:18:11.407975 253591 kubeadm.go:319] https://kubernetes.io/docs/concepts/cluster-administration/addons/ + I1102 23:18:11.407977 253591 kubeadm.go:319] + I1102 23:18:11.408037 253591 kubeadm.go:319] You can now join any number of control-plane nodes by copying certificate authorities + I1102 23:18:11.408084 253591 kubeadm.go:319] and service account keys on each node and then running the following as root: + I1102 23:18:11.408087 253591 kubeadm.go:319] + I1102 23:18:11.408140 253591 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token zzot1a.o97su5nifgm8hbsf \ + I1102 23:18:11.408198 253591 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:481e6c6c81577cc2e0f1743d9bdbb8d6de8e828bf4a5e4190480860442a4bdf3 \ + I1102 23:18:11.408209 253591 kubeadm.go:319] --control-plane + I1102 23:18:11.408211 253591 kubeadm.go:319] + I1102 23:18:11.408274 253591 kubeadm.go:319] Then you can join any number of worker nodes by running the following on each as root: + I1102 23:18:11.408279 253591 kubeadm.go:319] + I1102 23:18:11.408327 253591 kubeadm.go:319] kubeadm join control-plane.minikube.internal:8443 --token zzot1a.o97su5nifgm8hbsf \ + I1102 23:18:11.408387 253591 kubeadm.go:319] --discovery-token-ca-cert-hash sha256:481e6c6c81577cc2e0f1743d9bdbb8d6de8e828bf4a5e4190480860442a4bdf3 + I1102 23:18:11.411428 253591 kubeadm.go:319] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service' + I1102 23:18:11.411452 253591 cni.go:84] Creating CNI manager for "" + I1102 23:18:11.411462 253591 cni.go:158] "docker" driver + "docker" container runtime found on kubernetes v1.24+, recommending bridge + I1102 23:18:11.411725 253591 out.go:179] * Configuring bridge CNI (Container Networking Interface) ... + I1102 23:18:11.411874 253591 ssh_runner.go:195] Run: sudo mkdir -p /etc/cni/net.d + I1102 23:18:11.417306 253591 ssh_runner.go:362] scp memory --> /etc/cni/net.d/1-k8s.conflist (496 bytes) + I1102 23:18:11.425696 253591 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" + I1102 23:18:11.425760 253591 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig + I1102 23:18:11.425776 253591 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig label --overwrite nodes scheduled-stop-206205 minikube.k8s.io/updated_at=2025_11_02T23_18_11_0700 minikube.k8s.io/version=v1.37.0 minikube.k8s.io/commit=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 minikube.k8s.io/name=scheduled-stop-206205 minikube.k8s.io/primary=true + I1102 23:18:11.471856 253591 kubeadm.go:1114] duration metric: took 46.152711ms to wait for elevateKubeSystemPrivileges + I1102 23:18:11.471871 253591 ops.go:34] apiserver oom_adj: -16 + I1102 23:18:11.471876 253591 kubeadm.go:403] duration metric: took 6.76499633s to StartCluster + I1102 23:18:11.471891 253591 settings.go:142] acquiring lock: {Name:mkb9be79a929c9a9a1c960b77da9cebe4afb2abe Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:11.471971 253591 settings.go:150] Updating kubeconfig: /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig + I1102 23:18:11.472443 253591 lock.go:35] WriteFile acquiring /home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/kubeconfig: {Name:mk69953fc2a8af178bf939270c575260f1197035 Clock:{} Delay:500ms Timeout:1m0s Cancel:} + I1102 23:18:11.472582 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml" + I1102 23:18:11.472577 253591 start.go:236] Will wait 6m0s for node &{Name: IP:192.168.76.2 Port:8443 KubernetesVersion:v1.34.1 ContainerRuntime:docker ControlPlane:true Worker:true} + I1102 23:18:11.472616 253591 addons.go:512] enable addons start: toEnable=map[ambassador:false amd-gpu-device-plugin:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubeflow:false kubetail:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-device-plugin:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-rancher:false volcano:false volumesnapshots:false yakd:false] + I1102 23:18:11.472671 253591 addons.go:70] Setting storage-provisioner=true in profile "scheduled-stop-206205" + I1102 23:18:11.472676 253591 addons.go:70] Setting default-storageclass=true in profile "scheduled-stop-206205" + I1102 23:18:11.472687 253591 addons.go:239] Setting addon storage-provisioner=true in "scheduled-stop-206205" + I1102 23:18:11.472689 253591 addons_storage_classes.go:34] enableOrDisableStorageClasses default-storageclass=true on "scheduled-stop-206205" + I1102 23:18:11.472709 253591 host.go:66] Checking if "scheduled-stop-206205" exists ... + I1102 23:18:11.472725 253591 config.go:182] Loaded profile config "scheduled-stop-206205": Driver=docker, ContainerRuntime=docker, KubernetesVersion=v1.34.1 + I1102 23:18:11.472814 253591 out.go:179] * Verifying Kubernetes components... + I1102 23:18:11.472941 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.473073 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.473107 253591 ssh_runner.go:195] Run: sudo systemctl daemon-reload + I1102 23:18:11.486155 253591 out.go:179] - Using image gcr.io/k8s-minikube/storage-provisioner:v5 + I1102 23:18:11.486490 253591 addons.go:436] installing /etc/kubernetes/addons/storage-provisioner.yaml + I1102 23:18:11.486497 253591 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes) + I1102 23:18:11.486549 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:11.486753 253591 addons.go:239] Setting addon default-storageclass=true in "scheduled-stop-206205" + I1102 23:18:11.486771 253591 host.go:66] Checking if "scheduled-stop-206205" exists ... + I1102 23:18:11.487085 253591 cli_runner.go:164] Run: docker container inspect scheduled-stop-206205 --format={{.State.Status}} + I1102 23:18:11.501406 253591 addons.go:436] installing /etc/kubernetes/addons/storageclass.yaml + I1102 23:18:11.501411 253591 ssh_runner.go:362] scp storageclass/storageclass.yaml --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) + I1102 23:18:11.501462 253591 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" scheduled-stop-206205 + I1102 23:18:11.502573 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:11.512341 253591 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32969 SSHKeyPath:/home/minikube/minikube-integration/e2222ae36f11d3515cb4a1cbfbc513a974c210e6-31863/.minikube/machines/scheduled-stop-206205/id_rsa Username:docker} + I1102 23:18:11.520398 253591 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n 192.168.76.1 host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.34.1/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -" + I1102 23:18:11.545285 253591 ssh_runner.go:195] Run: sudo systemctl start kubelet + I1102 23:18:11.598698 253591 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml + I1102 23:18:11.605838 253591 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.34.1/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml + I1102 23:18:11.609054 253591 start.go:977] {"host.minikube.internal": 192.168.76.1} host record injected into CoreDNS's ConfigMap + I1102 23:18:11.609590 253591 api_server.go:52] waiting for apiserver process to appear ... + I1102 23:18:11.609636 253591 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* + I1102 23:18:11.736315 253591 api_server.go:72] duration metric: took 263.723816ms to wait for apiserver process to appear ... + I1102 23:18:11.736321 253591 api_server.go:88] waiting for apiserver healthz status ... + I1102 23:18:11.736327 253591 api_server.go:253] Checking apiserver healthz at https://192.168.76.2:8443/healthz ... + I1102 23:18:11.738794 253591 api_server.go:279] https://192.168.76.2:8443/healthz returned 200: + ok + I1102 23:18:11.739059 253591 out.go:179] * Enabled addons: storage-provisioner, default-storageclass + I1102 23:18:11.739186 253591 addons.go:515] duration metric: took 266.567411ms for enable addons: enabled=[storage-provisioner default-storageclass] + I1102 23:18:11.739206 253591 api_server.go:141] control plane version: v1.34.1 + I1102 23:18:11.739215 253591 api_server.go:131] duration metric: took 2.891343ms to wait for apiserver health ... + I1102 23:18:11.739220 253591 system_pods.go:43] waiting for kube-system pods to appear ... + I1102 23:18:11.740465 253591 system_pods.go:59] 5 kube-system pods found + I1102 23:18:11.740480 253591 system_pods.go:61] "etcd-scheduled-stop-206205" [3a3467ab-2584-452f-9bdc-476103de560e] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd]) + I1102 23:18:11.740485 253591 system_pods.go:61] "kube-apiserver-scheduled-stop-206205" [be3cdd37-7ff6-4d68-8c6b-cd82a78eb07a] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver]) + I1102 23:18:11.740488 253591 system_pods.go:61] "kube-controller-manager-scheduled-stop-206205" [7c802ff5-1d76-4a4b-99ed-54947cb4e48f] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) + I1102 23:18:11.740491 253591 system_pods.go:61] "kube-scheduler-scheduled-stop-206205" [357b6ad0-c778-4062-a5b1-40b7a42b1b45] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler]) + I1102 23:18:11.740493 253591 system_pods.go:61] "storage-provisioner" [30637bd6-c7f0-4fe5-b79f-a0d9e1f7ca18] Pending + I1102 23:18:11.740496 253591 system_pods.go:74] duration metric: took 1.273402ms to wait for pod list to return data ... + I1102 23:18:11.740501 253591 kubeadm.go:587] duration metric: took 267.910986ms to wait for: map[apiserver:true system_pods:true] + I1102 23:18:11.740508 253591 node_conditions.go:102] verifying NodePressure condition ... + I1102 23:18:11.741731 253591 node_conditions.go:122] node storage ephemeral capacity is 385926528Ki + I1102 23:18:11.741742 253591 node_conditions.go:123] node cpu capacity is 8 + I1102 23:18:11.741750 253591 node_conditions.go:105] duration metric: took 1.240672ms to run NodePressure ... + I1102 23:18:11.741757 253591 start.go:242] waiting for startup goroutines ... + I1102 23:18:12.111097 253591 kapi.go:214] "coredns" deployment in "kube-system" namespace and "scheduled-stop-206205" context rescaled to 1 replicas + I1102 23:18:12.111110 253591 start.go:247] waiting for cluster config update ... + I1102 23:18:12.111116 253591 start.go:256] writing updated cluster config ... + I1102 23:18:12.111299 253591 ssh_runner.go:195] Run: rm -f paused + I1102 23:18:12.140155 253591 start.go:628] kubectl: 1.34.1, cluster: 1.34.1 (minor skew: 0) + I1102 23:18:12.140451 253591 out.go:179] * Done! kubectl is now configured to use "scheduled-stop-206205" cluster and "default" namespace by default + + + ==> Docker <== + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.495395710Z" level=info msg="Loading containers: done." + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.499868631Z" level=info msg="Docker daemon" commit=f8215cc containerd-snapshotter=false storage-driver=overlay2 version=28.5.1 + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.499901472Z" level=info msg="Initializing buildkit" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.509786801Z" level=info msg="Completed buildkit initialization" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.512985444Z" level=info msg="Daemon has completed initialization" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513034998Z" level=info msg="API listen on /run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Started docker.service - Docker Application Container Engine. + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513042406Z" level=info msg="API listen on [::]:2376" + Nov 02 23:18:03 scheduled-stop-206205 dockerd[1053]: time="2025-11-02T23:18:03.513043378Z" level=info msg="API listen on /var/run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Starting cri-docker.service - CRI Interface for Docker Application Container Engine... + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Starting cri-dockerd dev (HEAD)" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Connecting to docker on the Endpoint unix:///var/run/docker.sock" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Start docker client with request timeout 0s" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Hairpin mode is set to hairpin-veth" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Loaded network plugin cni" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Docker cri networking managed by network plugin cni" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Setting cgroupDriver systemd" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Docker cri received runtime config &RuntimeConfig{NetworkConfig:&NetworkConfig{PodCidr:,},}" + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Starting the GRPC backend for the Docker CRI interface." + Nov 02 23:18:03 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:03Z" level=info msg="Start cri-dockerd grpc backend" + Nov 02 23:18:03 scheduled-stop-206205 systemd[1]: Started cri-docker.service - CRI Interface for Docker Application Container Engine. + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/9f2a78be2847fcb06b2823f22f5f0affcfcf7152244128ce42976da2bfaf52cb/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/0a4584e48de87263ec3357dc6f99df95620088cbed099df4184139013c8ae44b/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/d7b6e70bdcc89ff1ba082e5ef5211e4a08f08d2dbf1ac60c41d519f51a3ac28a/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + Nov 02 23:18:07 scheduled-stop-206205 cri-dockerd[1362]: time="2025-11-02T23:18:07Z" level=info msg="Will attempt to re-write config file /var/lib/docker/containers/6f36d8ffcdc464b0e96984f4cf3f0b7d21f82ef8dffa4a527b77f1bc4d393f84/resolv.conf as [nameserver 192.168.76.1 search test-pods.svc.cluster.local svc.cluster.local cluster.local c.k8s-infra-prow-build.internal google.internal options ndots:5]" + + + ==> container status <== + CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD NAMESPACE + 5db718ff95e2b 7dd6aaa1717ab 5 seconds ago Running kube-scheduler 0 6f36d8ffcdc46 kube-scheduler-scheduled-stop-206205 kube-system + bfb168ef202c9 c3994bc696102 5 seconds ago Running kube-apiserver 0 d7b6e70bdcc89 kube-apiserver-scheduled-stop-206205 kube-system + 10cdff77fbe70 c80c8dbafe7dd 5 seconds ago Running kube-controller-manager 0 0a4584e48de87 kube-controller-manager-scheduled-stop-206205 kube-system + ee6b590db1694 5f1f5298c888d 5 seconds ago Running etcd 0 9f2a78be2847f etcd-scheduled-stop-206205 kube-system + + + ==> describe nodes <== + Name: scheduled-stop-206205 + Roles: control-plane + Labels: beta.kubernetes.io/arch=amd64 + beta.kubernetes.io/os=linux + kubernetes.io/arch=amd64 + kubernetes.io/hostname=scheduled-stop-206205 + kubernetes.io/os=linux + minikube.k8s.io/commit=e2222ae36f11d3515cb4a1cbfbc513a974c210e6 + minikube.k8s.io/name=scheduled-stop-206205 + minikube.k8s.io/primary=true + minikube.k8s.io/updated_at=2025_11_02T23_18_11_0700 + minikube.k8s.io/version=v1.37.0 + node-role.kubernetes.io/control-plane= + node.kubernetes.io/exclude-from-external-load-balancers= + Annotations: volumes.kubernetes.io/controller-managed-attach-detach: true + CreationTimestamp: Sun, 02 Nov 2025 23:18:08 +0000 + Taints: node.kubernetes.io/not-ready:NoSchedule + Unschedulable: false + Lease: + HolderIdentity: scheduled-stop-206205 + AcquireTime: + RenewTime: Sun, 02 Nov 2025 23:18:10 +0000 + Conditions: + Type Status LastHeartbeatTime LastTransitionTime Reason Message + ---- ------ ----------------- ------------------ ------ ------- + MemoryPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available + DiskPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure + PIDPressure False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletHasSufficientPID kubelet has sufficient PID available + Ready False Sun, 02 Nov 2025 23:18:10 +0000 Sun, 02 Nov 2025 23:18:07 +0000 KubeletNotReady container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized + Addresses: + InternalIP: 192.168.76.2 + Hostname: scheduled-stop-206205 + Capacity: + cpu: 8 + ephemeral-storage: 385926528Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 63789124Ki + pods: 110 + Allocatable: + cpu: 8 + ephemeral-storage: 385926528Ki + hugepages-1Gi: 0 + hugepages-2Mi: 0 + memory: 63789124Ki + pods: 110 + System Info: + Machine ID: 98aac72b9abe9f06f1b9b38568f5cc96 + System UUID: 7a6fb796-ef04-49e4-b906-59d6d0acf104 + Boot ID: 239636f5-8285-461a-a1b0-1dff3163ae78 + Kernel Version: 6.6.97+ + OS Image: Debian GNU/Linux 12 (bookworm) + Operating System: linux + Architecture: amd64 + Container Runtime Version: docker://28.5.1 + Kubelet Version: v1.34.1 + Kube-Proxy Version: + Non-terminated Pods: (4 in total) + Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age + --------- ---- ------------ ---------- --------------- ------------- --- + kube-system etcd-scheduled-stop-206205 100m (1%) 0 (0%) 100Mi (0%) 0 (0%) 2s + kube-system kube-apiserver-scheduled-stop-206205 250m (3%) 0 (0%) 0 (0%) 0 (0%) 2s + kube-system kube-controller-manager-scheduled-stop-206205 200m (2%) 0 (0%) 0 (0%) 0 (0%) 2s + kube-system kube-scheduler-scheduled-stop-206205 100m (1%) 0 (0%) 0 (0%) 0 (0%) 2s + Allocated resources: + (Total limits may be over 100 percent, i.e., overcommitted.) + Resource Requests Limits + -------- -------- ------ + cpu 650m (8%) 0 (0%) + memory 100Mi (0%) 0 (0%) + ephemeral-storage 0 (0%) 0 (0%) + hugepages-1Gi 0 (0%) 0 (0%) + hugepages-2Mi 0 (0%) 0 (0%) + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal Starting 2s kubelet Starting kubelet. + Normal NodeAllocatableEnforced 2s kubelet Updated Node Allocatable limit across pods + Normal NodeHasSufficientMemory 2s kubelet Node scheduled-stop-206205 status is now: NodeHasSufficientMemory + Normal NodeHasNoDiskPressure 2s kubelet Node scheduled-stop-206205 status is now: NodeHasNoDiskPressure + Normal NodeHasSufficientPID 2s kubelet Node scheduled-stop-206205 status is now: NodeHasSufficientPID + + + ==> dmesg <== + [ +0.007925] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +1.969222] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007882] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.055110] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000000] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000011] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007861] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [Nov 2 23:15] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007880] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.056105] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000007] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.007875] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007851] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +8.240272] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000010] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000008] IPv4: martian source 10.96.0.1 from 10.244.0.2, on dev br-1a04657d00b3 + [ +0.000002] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007883] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [ +0.007894] ll header: 00000000: 76 0f 75 c5 83 0d 5a 17 a1 b2 6f fb 08 00 + [Nov 2 23:16] IPv4: martian source 10.244.0.1 from 10.244.0.2, on dev eth0 + [ +0.002729] IPv4: martian source 10.244.0.1 from 10.244.0.3, on dev eth0 + [ +0.004279] ll header: 00000000: ff ff ff ff ff ff f6 da 32 1e 36 f0 08 06 + [ +0.014177] ll header: 00000000: ff ff ff ff ff ff 0e 8f fb 63 f8 c5 08 06 + [Nov 2 23:17] IPv4: martian source 10.244.0.1 from 10.244.0.4, on dev eth0 + [ +0.007032] ll header: 00000000: ff ff ff ff ff ff 3e 52 8f c5 07 40 08 06 + + + ==> etcd [ee6b590db169] <== + {"level":"warn","ts":"2025-11-02T23:18:08.342428Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60338","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.347248Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60344","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.351186Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60350","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.354399Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60360","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.357580Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60384","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.360743Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60410","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.364728Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60432","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.367889Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60468","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.372142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60474","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.375389Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60480","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.390957Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60512","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.394409Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60528","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.399792Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60568","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.402813Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60588","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.405858Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60598","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.409006Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60618","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.412203Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60648","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.415052Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60656","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.418142Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60668","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.421304Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60680","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.424343Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60698","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.435025Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60716","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.438022Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60740","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.441090Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60748","server-name":"","error":"EOF"} + {"level":"warn","ts":"2025-11-02T23:18:08.463684Z","caller":"embed/config_logging.go:188","msg":"rejected connection on client endpoint","remote-addr":"127.0.0.1:60780","server-name":"","error":"EOF"} + + + ==> kernel <== + 23:18:12 up 12 days, 17 min, 0 user, load average: 0.46, 0.78, 1.17 + Linux scheduled-stop-206205 6.6.97+ #1 SMP Fri Aug 22 11:53:37 UTC 2025 x86_64 GNU/Linux + PRETTY_NAME="Debian GNU/Linux 12 (bookworm)" + + + ==> kube-apiserver [bfb168ef202c] <== + I1102 23:18:08.753138 1 default_servicecidr_controller.go:166] Creating default ServiceCIDR with CIDRs: [10.96.0.0/12] + I1102 23:18:08.753051 1 apf_controller.go:382] Running API Priority and Fairness config worker + I1102 23:18:08.753172 1 apf_controller.go:385] Running API Priority and Fairness periodic rebalancing process + I1102 23:18:08.753192 1 handler_discovery.go:451] Starting ResourceDiscoveryManager + I1102 23:18:08.753244 1 cache.go:39] Caches are synced for LocalAvailability controller + I1102 23:18:08.753259 1 cache.go:39] Caches are synced for RemoteAvailability controller + I1102 23:18:08.753943 1 controller.go:667] quota admission added evaluator for: namespaces + I1102 23:18:08.755940 1 default_servicecidr_controller.go:228] Setting default ServiceCIDR condition Ready to True + I1102 23:18:08.755965 1 cidrallocator.go:301] created ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:18:08.760634 1 cidrallocator.go:277] updated ClusterIP allocator for Service CIDR 10.96.0.0/12 + I1102 23:18:08.760833 1 default_servicecidr_controller.go:137] Shutting down kubernetes-service-cidr-controller + I1102 23:18:08.770224 1 controller.go:667] quota admission added evaluator for: leases.coordination.k8s.io + I1102 23:18:09.655189 1 storage_scheduling.go:95] created PriorityClass system-node-critical with value 2000001000 + I1102 23:18:09.657367 1 storage_scheduling.go:95] created PriorityClass system-cluster-critical with value 2000000000 + I1102 23:18:09.657377 1 storage_scheduling.go:111] all system priority classes are created successfully or already exist. + I1102 23:18:09.880746 1 controller.go:667] quota admission added evaluator for: roles.rbac.authorization.k8s.io + I1102 23:18:09.898007 1 controller.go:667] quota admission added evaluator for: rolebindings.rbac.authorization.k8s.io + I1102 23:18:09.956161 1 alloc.go:328] "allocated clusterIPs" service="default/kubernetes" clusterIPs={"IPv4":"10.96.0.1"} + W1102 23:18:09.958846 1 lease.go:265] Resetting endpoints for master service "kubernetes" to [192.168.76.2] + I1102 23:18:09.959340 1 controller.go:667] quota admission added evaluator for: endpoints + I1102 23:18:09.961256 1 controller.go:667] quota admission added evaluator for: endpointslices.discovery.k8s.io + I1102 23:18:10.674093 1 controller.go:667] quota admission added evaluator for: serviceaccounts + I1102 23:18:10.810122 1 controller.go:667] quota admission added evaluator for: deployments.apps + I1102 23:18:10.813893 1 alloc.go:328] "allocated clusterIPs" service="kube-system/kube-dns" clusterIPs={"IPv4":"10.96.0.10"} + I1102 23:18:10.818171 1 controller.go:667] quota admission added evaluator for: daemonsets.apps + + + ==> kube-controller-manager [10cdff77fbe7] <== + I1102 23:18:12.472703 1 controllermanager.go:781] "Started controller" controller="replicaset-controller" + I1102 23:18:12.472763 1 replica_set.go:243] "Starting controller" logger="replicaset-controller" name="replicaset" + I1102 23:18:12.472769 1 shared_informer.go:349] "Waiting for caches to sync" controller="ReplicaSet" + I1102 23:18:12.624073 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" + I1102 23:18:12.624088 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-serving" + I1102 23:18:12.624104 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624237 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" + I1102 23:18:12.624254 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kubelet-client" + I1102 23:18:12.624269 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624538 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" + I1102 23:18:12.624549 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-kube-apiserver-client" + I1102 23:18:12.624560 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.624700 1 controllermanager.go:781] "Started controller" controller="certificatesigningrequest-signing-controller" + I1102 23:18:12.624734 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" + I1102 23:18:12.624741 1 shared_informer.go:349] "Waiting for caches to sync" controller="certificate-csrsigning-legacy-unknown" + I1102 23:18:12.624752 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/var/lib/minikube/certs/ca.crt::/var/lib/minikube/certs/ca.key" + I1102 23:18:12.774962 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" + I1102 23:18:12.774993 1 controllermanager.go:781] "Started controller" controller="node-ipam-controller" + I1102 23:18:12.775065 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" + I1102 23:18:12.775070 1 shared_informer.go:349] "Waiting for caches to sync" controller="node" + I1102 23:18:12.923282 1 controllermanager.go:781] "Started controller" controller="volumeattributesclass-protection-controller" + I1102 23:18:12.923296 1 controllermanager.go:733] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] + I1102 23:18:12.923311 1 controllermanager.go:733] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] + I1102 23:18:12.923338 1 vac_protection_controller.go:206] "Starting VAC protection controller" logger="volumeattributesclass-protection-controller" + I1102 23:18:12.923344 1 shared_informer.go:349] "Waiting for caches to sync" controller="VAC protection" + + + ==> kube-scheduler [5db718ff95e2] <== + E1102 23:18:08.674554 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Node: nodes is forbidden: User \"system:kube-scheduler\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" + E1102 23:18:08.674593 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" + E1102 23:18:08.674407 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" + E1102 23:18:08.674737 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" + E1102 23:18:08.674784 1 reflector.go:205] "Failed to watch" err="failed to list *v1.VolumeAttachment: volumeattachments.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"volumeattachments\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.VolumeAttachment" + E1102 23:18:08.674898 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Pod: pods is forbidden: User \"system:kube-scheduler\" cannot list resource \"pods\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Pod" + E1102 23:18:08.674967 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + E1102 23:18:08.674987 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice" + E1102 23:18:08.675001 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass" + E1102 23:18:08.675006 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csinodes\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSINode" + E1102 23:18:08.675038 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User \"system:kube-scheduler\" cannot list resource \"poddisruptionbudgets\" in API group \"policy\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PodDisruptionBudget" + E1102 23:18:08.675037 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumeclaims\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolumeClaim" + E1102 23:18:08.675073 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicasets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicaSet" + E1102 23:18:08.675086 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User \"system:kube-scheduler\" cannot list resource \"replicationcontrollers\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ReplicationController" + E1102 23:18:08.675091 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceClaim: resourceclaims.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceclaims\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceClaim" + E1102 23:18:08.675133 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"storageclasses\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StorageClass" + E1102 23:18:09.498694 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"resourceslices\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.ResourceSlice" + E1102 23:18:09.518277 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csistoragecapacities\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIStorageCapacity" + E1102 23:18:09.583032 1 reflector.go:205] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"extension-apiserver-authentication\" is forbidden: User \"system:kube-scheduler\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"kube-system\"" logger="UnhandledError" reflector="runtime/asm_amd64.s:1700" type="*v1.ConfigMap" + E1102 23:18:09.630579 1 reflector.go:205] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" + E1102 23:18:09.656159 1 reflector.go:205] "Failed to watch" err="failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User \"system:kube-scheduler\" cannot list resource \"statefulsets\" in API group \"apps\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.StatefulSet" + E1102 23:18:09.734211 1 reflector.go:205] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:kube-scheduler\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" + E1102 23:18:09.749758 1 reflector.go:205] "Failed to watch" err="failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User \"system:kube-scheduler\" cannot list resource \"persistentvolumes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.PersistentVolume" + E1102 23:18:09.761325 1 reflector.go:205] "Failed to watch" err="failed to list *v1.DeviceClass: deviceclasses.resource.k8s.io is forbidden: User \"system:kube-scheduler\" cannot list resource \"deviceclasses\" in API group \"resource.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.DeviceClass" + I1102 23:18:12.573391 1 shared_informer.go:356] "Caches are synced" controller="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" + + + ==> kubelet <== + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760447 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-certs\" (UniqueName: \"kubernetes.io/host-path/13b7bafac6e39482382d174d83e53f4c-etcd-certs\") pod \"etcd-scheduled-stop-206205\" (UID: \"13b7bafac6e39482382d174d83e53f4c\") " pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760455 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-etc-ca-certificates\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760466 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubeconfig\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-kubeconfig\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760508 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-ca-certs\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760530 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"flexvolume-dir\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-flexvolume-dir\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760545 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"k8s-certs\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-k8s-certs\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760563 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/4323dcc9d541be8dde00261aeb0f0cee-usr-local-share-ca-certificates\") pod \"kube-apiserver-scheduled-stop-206205\" (UID: \"4323dcc9d541be8dde00261aeb0f0cee\") " pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760589 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-ca-certs\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760612 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-etc-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760621 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-usr-local-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:10 scheduled-stop-206205 kubelet[2229]: I1102 23:18:10.760633 2229 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-share-ca-certificates\" (UniqueName: \"kubernetes.io/host-path/e591286ccb0f48534698f161726ee9e8-usr-share-ca-certificates\") pod \"kube-controller-manager-scheduled-stop-206205\" (UID: \"e591286ccb0f48534698f161726ee9e8\") " pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.553524 2229 apiserver.go:52] "Watching apiserver" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.559667 2229 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.588855 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-scheduler-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.588993 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.589025 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.589043 2229 kubelet.go:3219] "Creating a mirror pod for static pod" pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592134 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-controller-manager-scheduled-stop-206205\" already exists" pod="kube-system/kube-controller-manager-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592531 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-apiserver-scheduled-stop-206205\" already exists" pod="kube-system/kube-apiserver-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592563 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"kube-scheduler-scheduled-stop-206205\" already exists" pod="kube-system/kube-scheduler-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: E1102 23:18:11.592723 2229 kubelet.go:3221] "Failed creating a mirror pod" err="pods \"etcd-scheduled-stop-206205\" already exists" pod="kube-system/etcd-scheduled-stop-206205" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.605300 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/etcd-scheduled-stop-206205" podStartSLOduration=1.6052884139999999 podStartE2EDuration="1.605288414s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.600546288 +0000 UTC m=+1.083824257" watchObservedRunningTime="2025-11-02 23:18:11.605288414 +0000 UTC m=+1.088566378" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.610015 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-controller-manager-scheduled-stop-206205" podStartSLOduration=1.610003192 podStartE2EDuration="1.610003192s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.60551876 +0000 UTC m=+1.088796727" watchObservedRunningTime="2025-11-02 23:18:11.610003192 +0000 UTC m=+1.093281161" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.616609 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-apiserver-scheduled-stop-206205" podStartSLOduration=1.616455215 podStartE2EDuration="1.616455215s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.610111732 +0000 UTC m=+1.093389699" watchObservedRunningTime="2025-11-02 23:18:11.616455215 +0000 UTC m=+1.099733181" + Nov 02 23:18:11 scheduled-stop-206205 kubelet[2229]: I1102 23:18:11.616714 2229 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="kube-system/kube-scheduler-scheduled-stop-206205" podStartSLOduration=1.616707898 podStartE2EDuration="1.616707898s" podCreationTimestamp="2025-11-02 23:18:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-02 23:18:11.616318549 +0000 UTC m=+1.099596516" watchObservedRunningTime="2025-11-02 23:18:11.616707898 +0000 UTC m=+1.099985865" + + + -- /stdout -- + helpers_test.go:262: (dbg) Run: out/minikube-linux-amd64 status --format={{.APIServer}} -p scheduled-stop-206205 -n scheduled-stop-206205 + helpers_test.go:269: (dbg) Run: kubectl --context scheduled-stop-206205 get po -o=jsonpath={.items[*].metadata.name} -A --field-selector=status.phase!=Running + helpers_test.go:280: non-running pods: storage-provisioner + helpers_test.go:282: ======> post-mortem[TestScheduledStopUnix]: describe non-running pods <====== + helpers_test.go:285: (dbg) Run: kubectl --context scheduled-stop-206205 describe pod storage-provisioner + helpers_test.go:285: (dbg) Non-zero exit: kubectl --context scheduled-stop-206205 describe pod storage-provisioner: exit status 1 (34.067242ms) + + ** stderr ** + Error from server (NotFound): pods "storage-provisioner" not found + + ** /stderr ** + helpers_test.go:287: kubectl --context scheduled-stop-206205 describe pod storage-provisioner: exit status 1 + helpers_test.go:175: Cleaning up "scheduled-stop-206205" profile ... + helpers_test.go:178: (dbg) Run: out/minikube-linux-amd64 delete -p scheduled-stop-206205 + helpers_test.go:178: (dbg) Done: out/minikube-linux-amd64 delete -p scheduled-stop-206205: (1.661455603s) + +DONE 417 tests, 23 skipped, 1 failure in 2599.008s +++ result=1 +++ set +x +>> out/e2e-linux-amd64 exited with 1 at Sun Nov 2 23:29:46 UTC 2025 + +Not saving to DB +{ + "NumberOfTests": 347, + "NumberOfFail": 1, + "NumberOfPass": 323, + "NumberOfSkip": 23, + "FailedTests": [ + "TestScheduledStopUnix" + ], + "PassedTests": [ + "TestDownloadOnly/v1.28.0/json-events", + "TestDownloadOnly/v1.28.0/preload-exists", + "TestDownloadOnly/v1.28.0/LogsDuration", + "TestDownloadOnly/v1.28.0/DeleteAll", + "TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds", + "TestDownloadOnly/v1.34.1/json-events", + "TestDownloadOnly/v1.34.1/preload-exists", + "TestDownloadOnly/v1.34.1/LogsDuration", + "TestDownloadOnly/v1.34.1/DeleteAll", + "TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds", + "TestDownloadOnlyKic", + "TestBinaryMirror", + "TestOffline", + "TestAddons/PreSetup/EnablingAddonOnNonExistingCluster", + "TestAddons/PreSetup/DisablingAddonOnNonExistingCluster", + "TestAddons/Setup", + "TestAddons/serial/Volcano", + "TestAddons/serial/GCPAuth/Namespaces", + "TestAddons/serial/GCPAuth/FakeCredentials", + "TestAddons/parallel/Registry", + "TestAddons/parallel/RegistryCreds", + "TestAddons/parallel/Ingress", + "TestAddons/parallel/InspektorGadget", + "TestAddons/parallel/MetricsServer", + "TestAddons/parallel/CSI", + "TestAddons/parallel/Headlamp", + "TestAddons/parallel/CloudSpanner", + "TestAddons/parallel/LocalPath", + "TestAddons/parallel/NvidiaDevicePlugin", + "TestAddons/parallel/Yakd", + "TestAddons/parallel/AmdGpuDevicePlugin", + "TestAddons/StoppedEnableDisable", + "TestCertOptions", + "TestCertExpiration", + "TestDockerFlags", + "TestForceSystemdFlag", + "TestForceSystemdEnv", + "TestErrorSpam/setup", + "TestErrorSpam/start", + "TestErrorSpam/status", + "TestErrorSpam/pause", + "TestErrorSpam/unpause", + "TestErrorSpam/stop", + "TestFunctional/serial/CopySyncFile", + "TestFunctional/serial/StartWithProxy", + "TestFunctional/serial/AuditLog", + "TestFunctional/serial/SoftStart", + "TestFunctional/serial/KubeContext", + "TestFunctional/serial/KubectlGetPods", + "TestFunctional/serial/CacheCmd/cache/add_remote", + "TestFunctional/serial/CacheCmd/cache/add_local", + "TestFunctional/serial/CacheCmd/cache/CacheDelete", + "TestFunctional/serial/CacheCmd/cache/list", + "TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node", + "TestFunctional/serial/CacheCmd/cache/cache_reload", + "TestFunctional/serial/CacheCmd/cache/delete", + "TestFunctional/serial/MinikubeKubectlCmd", + "TestFunctional/serial/MinikubeKubectlCmdDirectly", + "TestFunctional/serial/ExtraConfig", + "TestFunctional/serial/ComponentHealth", + "TestFunctional/serial/LogsCmd", + "TestFunctional/serial/LogsFileCmd", + "TestFunctional/serial/InvalidService", + "TestFunctional/parallel/ConfigCmd", + "TestFunctional/parallel/DashboardCmd", + "TestFunctional/parallel/DryRun", + "TestFunctional/parallel/InternationalLanguage", + "TestFunctional/parallel/StatusCmd", + "TestFunctional/parallel/ServiceCmdConnect", + "TestFunctional/parallel/AddonsCmd", + "TestFunctional/parallel/PersistentVolumeClaim", + "TestFunctional/parallel/SSHCmd", + "TestFunctional/parallel/CpCmd", + "TestFunctional/parallel/MySQL", + "TestFunctional/parallel/FileSync", + "TestFunctional/parallel/CertSync", + "TestFunctional/parallel/NodeLabels", + "TestFunctional/parallel/NonActiveRuntimeDisabled", + "TestFunctional/parallel/License", + "TestFunctional/parallel/ServiceCmd/DeployApp", + "TestFunctional/parallel/Version/short", + "TestFunctional/parallel/Version/components", + "TestFunctional/parallel/ImageCommands/ImageListShort", + "TestFunctional/parallel/ImageCommands/ImageListTable", + "TestFunctional/parallel/ImageCommands/ImageListJson", + "TestFunctional/parallel/ImageCommands/ImageListYaml", + "TestFunctional/parallel/ImageCommands/ImageBuild", + "TestFunctional/parallel/ImageCommands/Setup", + "TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel", + "TestFunctional/parallel/ImageCommands/ImageLoadDaemon", + "TestFunctional/parallel/TunnelCmd/serial/StartTunnel", + "TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup", + "TestFunctional/parallel/ImageCommands/ImageReloadDaemon", + "TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon", + "TestFunctional/parallel/ImageCommands/ImageSaveToFile", + "TestFunctional/parallel/ImageCommands/ImageRemove", + "TestFunctional/parallel/ImageCommands/ImageLoadFromFile", + "TestFunctional/parallel/ImageCommands/ImageSaveDaemon", + "TestFunctional/parallel/ProfileCmd/profile_not_create", + "TestFunctional/parallel/ProfileCmd/profile_list", + "TestFunctional/parallel/ProfileCmd/profile_json_output", + "TestFunctional/parallel/MountCmd/any-port", + "TestFunctional/parallel/ServiceCmd/List", + "TestFunctional/parallel/ServiceCmd/JSONOutput", + "TestFunctional/parallel/ServiceCmd/HTTPS", + "TestFunctional/parallel/ServiceCmd/Format", + "TestFunctional/parallel/ServiceCmd/URL", + "TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP", + "TestFunctional/parallel/TunnelCmd/serial/AccessDirect", + "TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel", + "TestFunctional/parallel/DockerEnv/bash", + "TestFunctional/parallel/UpdateContextCmd/no_changes", + "TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster", + "TestFunctional/parallel/UpdateContextCmd/no_clusters", + "TestFunctional/parallel/MountCmd/specific-port", + "TestFunctional/parallel/MountCmd/VerifyCleanup", + "TestFunctional/delete_echo-server_images", + "TestFunctional/delete_my-image_image", + "TestFunctional/delete_minikube_cached_images", + "TestMultiControlPlane/serial/StartCluster", + "TestMultiControlPlane/serial/DeployApp", + "TestMultiControlPlane/serial/PingHostFromPods", + "TestMultiControlPlane/serial/AddWorkerNode", + "TestMultiControlPlane/serial/NodeLabels", + "TestMultiControlPlane/serial/HAppyAfterClusterStart", + "TestMultiControlPlane/serial/CopyFile", + "TestMultiControlPlane/serial/StopSecondaryNode", + "TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop", + "TestMultiControlPlane/serial/RestartSecondaryNode", + "TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart", + "TestMultiControlPlane/serial/RestartClusterKeepsNodes", + "TestMultiControlPlane/serial/DeleteSecondaryNode", + "TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete", + "TestMultiControlPlane/serial/StopCluster", + "TestMultiControlPlane/serial/RestartCluster", + "TestMultiControlPlane/serial/DegradedAfterClusterRestart", + "TestMultiControlPlane/serial/AddSecondaryNode", + "TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd", + "TestImageBuild/serial/Setup", + "TestImageBuild/serial/NormalBuild", + "TestImageBuild/serial/BuildWithBuildArg", + "TestImageBuild/serial/BuildWithDockerIgnore", + "TestImageBuild/serial/BuildWithSpecifiedDockerfile", + "TestJSONOutput/start/Command", + "TestJSONOutput/start/Audit", + "TestJSONOutput/start/parallel/DistinctCurrentSteps", + "TestJSONOutput/start/parallel/IncreasingCurrentSteps", + "TestJSONOutput/pause/Command", + "TestJSONOutput/pause/Audit", + "TestJSONOutput/pause/parallel/DistinctCurrentSteps", + "TestJSONOutput/pause/parallel/IncreasingCurrentSteps", + "TestJSONOutput/unpause/Command", + "TestJSONOutput/unpause/Audit", + "TestJSONOutput/unpause/parallel/DistinctCurrentSteps", + "TestJSONOutput/unpause/parallel/IncreasingCurrentSteps", + "TestJSONOutput/stop/Command", + "TestJSONOutput/stop/Audit", + "TestJSONOutput/stop/parallel/DistinctCurrentSteps", + "TestJSONOutput/stop/parallel/IncreasingCurrentSteps", + "TestErrorJSONOutput", + "TestKicCustomNetwork/create_custom_network", + "TestKicCustomNetwork/use_default_bridge_network", + "TestKicExistingNetwork", + "TestKicCustomSubnet", + "TestKicStaticIP", + "TestMainNoArgs", + "TestMinikubeProfile", + "TestMountStart/serial/StartWithMountFirst", + "TestMountStart/serial/VerifyMountFirst", + "TestMountStart/serial/StartWithMountSecond", + "TestMountStart/serial/VerifyMountSecond", + "TestMountStart/serial/DeleteFirst", + "TestMountStart/serial/VerifyMountPostDelete", + "TestMountStart/serial/Stop", + "TestMountStart/serial/RestartStopped", + "TestMountStart/serial/VerifyMountPostStop", + "TestMultiNode/serial/FreshStart2Nodes", + "TestMultiNode/serial/DeployApp2Nodes", + "TestMultiNode/serial/PingHostFrom2Pods", + "TestMultiNode/serial/AddNode", + "TestMultiNode/serial/MultiNodeLabels", + "TestMultiNode/serial/ProfileList", + "TestMultiNode/serial/CopyFile", + "TestMultiNode/serial/StopNode", + "TestMultiNode/serial/StartAfterStop", + "TestMultiNode/serial/RestartKeepsNodes", + "TestMultiNode/serial/DeleteNode", + "TestMultiNode/serial/StopMultiNode", + "TestMultiNode/serial/RestartMultiNode", + "TestMultiNode/serial/ValidateNameConflict", + "TestPreload", + "TestSkaffold", + "TestInsufficientStorage", + "TestRunningBinaryUpgrade", + "TestKubernetesUpgrade", + "TestMissingContainerUpgrade", + "TestNoKubernetes/serial/StartNoK8sWithVersion", + "TestNoKubernetes/serial/StartWithK8s", + "TestNoKubernetes/serial/StartWithStopK8s", + "TestNoKubernetes/serial/Start", + "TestNoKubernetes/serial/VerifyK8sNotRunning", + "TestNoKubernetes/serial/ProfileList", + "TestStoppedBinaryUpgrade/Setup", + "TestStoppedBinaryUpgrade/Upgrade", + "TestNoKubernetes/serial/Stop", + "TestNoKubernetes/serial/StartNoArgs", + "TestNoKubernetes/serial/VerifyK8sNotRunningSecond", + "TestStoppedBinaryUpgrade/MinikubeLogs", + "TestPause/serial/Start", + "TestPause/serial/SecondStartNoReconfiguration", + "TestNetworkPlugins/group/auto/Start", + "TestNetworkPlugins/group/kindnet/Start", + "TestNetworkPlugins/group/calico/Start", + "TestPause/serial/Pause", + "TestNetworkPlugins/group/auto/KubeletFlags", + "TestNetworkPlugins/group/auto/NetCatPod", + "TestPause/serial/VerifyStatus", + "TestPause/serial/Unpause", + "TestPause/serial/PauseAgain", + "TestPause/serial/DeletePaused", + "TestPause/serial/VerifyDeletedResources", + "TestNetworkPlugins/group/custom-flannel/Start", + "TestNetworkPlugins/group/auto/DNS", + "TestNetworkPlugins/group/auto/Localhost", + "TestNetworkPlugins/group/auto/HairPin", + "TestNetworkPlugins/group/false/Start", + "TestNetworkPlugins/group/calico/ControllerPod", + "TestNetworkPlugins/group/calico/KubeletFlags", + "TestNetworkPlugins/group/calico/NetCatPod", + "TestNetworkPlugins/group/kindnet/ControllerPod", + "TestNetworkPlugins/group/custom-flannel/KubeletFlags", + "TestNetworkPlugins/group/custom-flannel/NetCatPod", + "TestNetworkPlugins/group/kindnet/KubeletFlags", + "TestNetworkPlugins/group/kindnet/NetCatPod", + "TestNetworkPlugins/group/calico/DNS", + "TestNetworkPlugins/group/calico/Localhost", + "TestNetworkPlugins/group/calico/HairPin", + "TestNetworkPlugins/group/custom-flannel/DNS", + "TestNetworkPlugins/group/custom-flannel/Localhost", + "TestNetworkPlugins/group/custom-flannel/HairPin", + "TestNetworkPlugins/group/false/KubeletFlags", + "TestNetworkPlugins/group/false/NetCatPod", + "TestNetworkPlugins/group/kindnet/DNS", + "TestNetworkPlugins/group/kindnet/Localhost", + "TestNetworkPlugins/group/kindnet/HairPin", + "TestNetworkPlugins/group/false/DNS", + "TestNetworkPlugins/group/false/Localhost", + "TestNetworkPlugins/group/false/HairPin", + "TestNetworkPlugins/group/bridge/Start", + "TestNetworkPlugins/group/flannel/Start", + "TestNetworkPlugins/group/kubenet/Start", + "TestNetworkPlugins/group/enable-default-cni/Start", + "TestNetworkPlugins/group/flannel/ControllerPod", + "TestNetworkPlugins/group/flannel/KubeletFlags", + "TestNetworkPlugins/group/flannel/NetCatPod", + "TestNetworkPlugins/group/bridge/KubeletFlags", + "TestNetworkPlugins/group/bridge/NetCatPod", + "TestNetworkPlugins/group/kubenet/KubeletFlags", + "TestNetworkPlugins/group/flannel/DNS", + "TestNetworkPlugins/group/kubenet/NetCatPod", + "TestNetworkPlugins/group/flannel/Localhost", + "TestNetworkPlugins/group/flannel/HairPin", + "TestNetworkPlugins/group/bridge/DNS", + "TestNetworkPlugins/group/bridge/Localhost", + "TestNetworkPlugins/group/bridge/HairPin", + "TestNetworkPlugins/group/kubenet/DNS", + "TestNetworkPlugins/group/kubenet/Localhost", + "TestNetworkPlugins/group/kubenet/HairPin", + "TestNetworkPlugins/group/enable-default-cni/KubeletFlags", + "TestNetworkPlugins/group/enable-default-cni/NetCatPod", + "TestStartStop/group/old-k8s-version/serial/FirstStart", + "TestStartStop/group/no-preload/serial/FirstStart", + "TestNetworkPlugins/group/enable-default-cni/DNS", + "TestNetworkPlugins/group/enable-default-cni/Localhost", + "TestNetworkPlugins/group/enable-default-cni/HairPin", + "TestStartStop/group/default-k8s-diff-port/serial/FirstStart", + "TestStartStop/group/newest-cni/serial/FirstStart", + "TestStartStop/group/old-k8s-version/serial/DeployApp", + "TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive", + "TestStartStop/group/old-k8s-version/serial/Stop", + "TestStartStop/group/newest-cni/serial/DeployApp", + "TestStartStop/group/newest-cni/serial/EnableAddonWhileActive", + "TestStartStop/group/newest-cni/serial/Stop", + "TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop", + "TestStartStop/group/old-k8s-version/serial/SecondStart", + "TestStartStop/group/newest-cni/serial/EnableAddonAfterStop", + "TestStartStop/group/newest-cni/serial/SecondStart", + "TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop", + "TestStartStop/group/newest-cni/serial/AddonExistsAfterStop", + "TestStartStop/group/newest-cni/serial/VerifyKubernetesImages", + "TestStartStop/group/no-preload/serial/DeployApp", + "TestStartStop/group/newest-cni/serial/Pause", + "TestStartStop/group/embed-certs/serial/FirstStart", + "TestStartStop/group/default-k8s-diff-port/serial/DeployApp", + "TestStartStop/group/no-preload/serial/EnableAddonWhileActive", + "TestStartStop/group/no-preload/serial/Stop", + "TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive", + "TestStartStop/group/default-k8s-diff-port/serial/Stop", + "TestStartStop/group/no-preload/serial/EnableAddonAfterStop", + "TestStartStop/group/no-preload/serial/SecondStart", + "TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop", + "TestStartStop/group/default-k8s-diff-port/serial/SecondStart", + "TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop", + "TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop", + "TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages", + "TestStartStop/group/old-k8s-version/serial/Pause", + "TestStartStop/group/embed-certs/serial/DeployApp", + "TestStartStop/group/no-preload/serial/UserAppExistsAfterStop", + "TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop", + "TestStartStop/group/no-preload/serial/AddonExistsAfterStop", + "TestStartStop/group/embed-certs/serial/EnableAddonWhileActive", + "TestStartStop/group/embed-certs/serial/Stop", + "TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop", + "TestStartStop/group/no-preload/serial/VerifyKubernetesImages", + "TestStartStop/group/no-preload/serial/Pause", + "TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages", + "TestStartStop/group/default-k8s-diff-port/serial/Pause", + "TestStartStop/group/embed-certs/serial/EnableAddonAfterStop", + "TestStartStop/group/embed-certs/serial/SecondStart", + "TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop", + "TestStartStop/group/embed-certs/serial/AddonExistsAfterStop", + "TestStartStop/group/embed-certs/serial/VerifyKubernetesImages", + "TestStartStop/group/embed-certs/serial/Pause" + ], + "SkippedTests": [ + "TestDownloadOnly/v1.28.0/cached-images", + "TestDownloadOnly/v1.28.0/binaries", + "TestDownloadOnly/v1.28.0/kubectl", + "TestDownloadOnly/v1.34.1/cached-images", + "TestDownloadOnly/v1.34.1/binaries", + "TestDownloadOnly/v1.34.1/kubectl", + "TestAddons/serial/GCPAuth/RealCredentials", + "TestAddons/parallel/Olm", + "TestDockerEnvContainerd", + "TestHyperKitDriverInstallOrUpdate", + "TestHyperkitDriverSkipUpgrade", + "TestFunctional/parallel/PodmanEnv", + "TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDig", + "TestFunctional/parallel/TunnelCmd/serial/DNSResolutionByDscacheutil", + "TestFunctional/parallel/TunnelCmd/serial/AccessThroughDNS", + "TestFunctionalNewestKubernetes", + "TestGvisorAddon", + "TestImageBuild/serial/validateImageBuildWithBuildEnv", + "TestISOImage", + "TestChangeNoneUser", + "TestScheduledStopWindows", + "TestNetworkPlugins/group/cilium", + "TestStartStop/group/disable-driver-mounts" + ], + "Durations": { + "TestAddons/PreSetup/DisablingAddonOnNonExistingCluster": 0.03, + "TestAddons/PreSetup/EnablingAddonOnNonExistingCluster": 0.03, + "TestAddons/Setup": 183.37, + "TestAddons/StoppedEnableDisable": 10.95, + "TestAddons/parallel/AmdGpuDevicePlugin": 5.32, + "TestAddons/parallel/CSI": 42.2, + "TestAddons/parallel/CloudSpanner": 5.32, + "TestAddons/parallel/Headlamp": 28.88, + "TestAddons/parallel/Ingress": 27.7, + "TestAddons/parallel/InspektorGadget": 6.16, + "TestAddons/parallel/LocalPath": 66.02, + "TestAddons/parallel/MetricsServer": 5.42, + "TestAddons/parallel/NvidiaDevicePlugin": 5.31, + "TestAddons/parallel/Registry": 24.52, + "TestAddons/parallel/RegistryCreds": 0.39, + "TestAddons/parallel/Yakd": 10.41, + "TestAddons/serial/GCPAuth/FakeCredentials": 8.3, + "TestAddons/serial/GCPAuth/Namespaces": 0.07, + "TestAddons/serial/Volcano": 44.73, + "TestBinaryMirror": 0.33, + "TestCertExpiration": 242.48, + "TestCertOptions": 21.13, + "TestDockerFlags": 20.73, + "TestDownloadOnly/v1.28.0/DeleteAll": 0.12, + "TestDownloadOnly/v1.28.0/DeleteAlwaysSucceeds": 0.07, + "TestDownloadOnly/v1.28.0/LogsDuration": 0.03, + "TestDownloadOnly/v1.28.0/json-events": 6.31, + "TestDownloadOnly/v1.28.0/preload-exists": 0, + "TestDownloadOnly/v1.34.1/DeleteAll": 0.12, + "TestDownloadOnly/v1.34.1/DeleteAlwaysSucceeds": 0.07, + "TestDownloadOnly/v1.34.1/LogsDuration": 0.03, + "TestDownloadOnly/v1.34.1/json-events": 3.87, + "TestDownloadOnly/v1.34.1/preload-exists": 0, + "TestDownloadOnlyKic": 0.19, + "TestErrorJSONOutput": 0.11, + "TestErrorSpam/pause": 0.79, + "TestErrorSpam/setup": 19.58, + "TestErrorSpam/start": 0.33, + "TestErrorSpam/status": 0.59, + "TestErrorSpam/stop": 10.58, + "TestErrorSpam/unpause": 0.8, + "TestForceSystemdEnv": 18.63, + "TestForceSystemdFlag": 29.59, + "TestFunctional/delete_echo-server_images": 0.02, + "TestFunctional/delete_minikube_cached_images": 0.01, + "TestFunctional/delete_my-image_image": 0.01, + "TestFunctional/parallel/AddonsCmd": 0.07, + "TestFunctional/parallel/CertSync": 1.04, + "TestFunctional/parallel/ConfigCmd": 0.18, + "TestFunctional/parallel/CpCmd": 1.1, + "TestFunctional/parallel/DashboardCmd": 29.84, + "TestFunctional/parallel/DockerEnv/bash": 0.61, + "TestFunctional/parallel/DryRun": 0.18, + "TestFunctional/parallel/FileSync": 0.17, + "TestFunctional/parallel/ImageCommands/ImageBuild": 9.09, + "TestFunctional/parallel/ImageCommands/ImageListJson": 0.15, + "TestFunctional/parallel/ImageCommands/ImageListShort": 0.15, + "TestFunctional/parallel/ImageCommands/ImageListTable": 0.15, + "TestFunctional/parallel/ImageCommands/ImageListYaml": 0.15, + "TestFunctional/parallel/ImageCommands/ImageLoadDaemon": 0.62, + "TestFunctional/parallel/ImageCommands/ImageLoadFromFile": 0.38, + "TestFunctional/parallel/ImageCommands/ImageReloadDaemon": 0.52, + "TestFunctional/parallel/ImageCommands/ImageRemove": 0.29, + "TestFunctional/parallel/ImageCommands/ImageSaveDaemon": 0.21, + "TestFunctional/parallel/ImageCommands/ImageSaveToFile": 0.19, + "TestFunctional/parallel/ImageCommands/ImageTagAndLoadDaemon": 0.85, + "TestFunctional/parallel/ImageCommands/Setup": 0.69, + "TestFunctional/parallel/InternationalLanguage": 0.08, + "TestFunctional/parallel/License": 0.3, + "TestFunctional/parallel/MountCmd/VerifyCleanup": 1.33, + "TestFunctional/parallel/MountCmd/any-port": 25.84, + "TestFunctional/parallel/MountCmd/specific-port": 1.46, + "TestFunctional/parallel/MySQL": 25.18, + "TestFunctional/parallel/NodeLabels": 0.04, + "TestFunctional/parallel/NonActiveRuntimeDisabled": 0.19, + "TestFunctional/parallel/PersistentVolumeClaim": 59.83, + "TestFunctional/parallel/ProfileCmd/profile_json_output": 0.25, + "TestFunctional/parallel/ProfileCmd/profile_list": 0.24, + "TestFunctional/parallel/ProfileCmd/profile_not_create": 0.25, + "TestFunctional/parallel/SSHCmd": 0.38, + "TestFunctional/parallel/ServiceCmd/DeployApp": 11.1, + "TestFunctional/parallel/ServiceCmd/Format": 0.21, + "TestFunctional/parallel/ServiceCmd/HTTPS": 0.22, + "TestFunctional/parallel/ServiceCmd/JSONOutput": 0.2, + "TestFunctional/parallel/ServiceCmd/List": 0.21, + "TestFunctional/parallel/ServiceCmd/URL": 0.21, + "TestFunctional/parallel/ServiceCmdConnect": 18.5, + "TestFunctional/parallel/StatusCmd": 0.61, + "TestFunctional/parallel/TunnelCmd/serial/AccessDirect": 0, + "TestFunctional/parallel/TunnelCmd/serial/DeleteTunnel": 0.11, + "TestFunctional/parallel/TunnelCmd/serial/RunSecondTunnel": 0.4, + "TestFunctional/parallel/TunnelCmd/serial/StartTunnel": 0, + "TestFunctional/parallel/TunnelCmd/serial/WaitService/IngressIP": 0.03, + "TestFunctional/parallel/TunnelCmd/serial/WaitService/Setup": 22.1, + "TestFunctional/parallel/UpdateContextCmd/no_changes": 0.07, + "TestFunctional/parallel/UpdateContextCmd/no_clusters": 0.07, + "TestFunctional/parallel/UpdateContextCmd/no_minikube_cluster": 0.07, + "TestFunctional/parallel/Version/components": 0.3, + "TestFunctional/parallel/Version/short": 0.03, + "TestFunctional/serial/AuditLog": 0, + "TestFunctional/serial/CacheCmd/cache/CacheDelete": 0.03, + "TestFunctional/serial/CacheCmd/cache/add_local": 0.69, + "TestFunctional/serial/CacheCmd/cache/add_remote": 1.86, + "TestFunctional/serial/CacheCmd/cache/cache_reload": 0.87, + "TestFunctional/serial/CacheCmd/cache/delete": 0.05, + "TestFunctional/serial/CacheCmd/cache/list": 0.03, + "TestFunctional/serial/CacheCmd/cache/verify_cache_inside_node": 0.19, + "TestFunctional/serial/ComponentHealth": 0.04, + "TestFunctional/serial/CopySyncFile": 0, + "TestFunctional/serial/ExtraConfig": 46.18, + "TestFunctional/serial/InvalidService": 4.23, + "TestFunctional/serial/KubeContext": 0.03, + "TestFunctional/serial/KubectlGetPods": 0.04, + "TestFunctional/serial/LogsCmd": 0.56, + "TestFunctional/serial/LogsFileCmd": 0.56, + "TestFunctional/serial/MinikubeKubectlCmd": 0.06, + "TestFunctional/serial/MinikubeKubectlCmdDirectly": 0.06, + "TestFunctional/serial/SoftStart": 55.24, + "TestFunctional/serial/StartWithProxy": 64.78, + "TestImageBuild/serial/BuildWithBuildArg": 0.51, + "TestImageBuild/serial/BuildWithDockerIgnore": 0.31, + "TestImageBuild/serial/BuildWithSpecifiedDockerfile": 0.28, + "TestImageBuild/serial/NormalBuild": 0.79, + "TestImageBuild/serial/Setup": 19.35, + "TestInsufficientStorage": 10.91, + "TestJSONOutput/pause/Audit": 0, + "TestJSONOutput/pause/Command": 0.34, + "TestJSONOutput/pause/parallel/DistinctCurrentSteps": 0, + "TestJSONOutput/pause/parallel/IncreasingCurrentSteps": 0, + "TestJSONOutput/start/Audit": 0, + "TestJSONOutput/start/Command": 67.1, + "TestJSONOutput/start/parallel/DistinctCurrentSteps": 0, + "TestJSONOutput/start/parallel/IncreasingCurrentSteps": 0, + "TestJSONOutput/stop/Audit": 0, + "TestJSONOutput/stop/Command": 10.54, + "TestJSONOutput/stop/parallel/DistinctCurrentSteps": 0, + "TestJSONOutput/stop/parallel/IncreasingCurrentSteps": 0, + "TestJSONOutput/unpause/Audit": 0, + "TestJSONOutput/unpause/Command": 0.3, + "TestJSONOutput/unpause/parallel/DistinctCurrentSteps": 0, + "TestJSONOutput/unpause/parallel/IncreasingCurrentSteps": 0, + "TestKicCustomNetwork/create_custom_network": 17.85, + "TestKicCustomNetwork/use_default_bridge_network": 21.11, + "TestKicCustomSubnet": 20.13, + "TestKicExistingNetwork": 20.43, + "TestKicStaticIP": 21.12, + "TestKubernetesUpgrade": 166.08, + "TestMainNoArgs": 0.03, + "TestMinikubeProfile": 39.1, + "TestMissingContainerUpgrade": 59.87, + "TestMountStart/serial/DeleteFirst": 1.3, + "TestMountStart/serial/RestartStopped": 7.08, + "TestMountStart/serial/StartWithMountFirst": 5.52, + "TestMountStart/serial/StartWithMountSecond": 4.93, + "TestMountStart/serial/Stop": 1.15, + "TestMountStart/serial/VerifyMountFirst": 0.18, + "TestMountStart/serial/VerifyMountPostDelete": 0.18, + "TestMountStart/serial/VerifyMountPostStop": 0.18, + "TestMountStart/serial/VerifyMountSecond": 0.18, + "TestMultiControlPlane/serial/AddSecondaryNode": 43.46, + "TestMultiControlPlane/serial/AddWorkerNode": 36.96, + "TestMultiControlPlane/serial/CopyFile": 11.14, + "TestMultiControlPlane/serial/DegradedAfterClusterRestart": 0.48, + "TestMultiControlPlane/serial/DegradedAfterControlPlaneNodeStop": 0.49, + "TestMultiControlPlane/serial/DegradedAfterSecondaryNodeDelete": 0.48, + "TestMultiControlPlane/serial/DeleteSecondaryNode": 8.09, + "TestMultiControlPlane/serial/DeployApp": 3.02, + "TestMultiControlPlane/serial/HAppyAfterClusterStart": 0.62, + "TestMultiControlPlane/serial/HAppyAfterSecondaryNodeAdd": 0.62, + "TestMultiControlPlane/serial/HAppyAfterSecondaryNodeRestart": 0.61, + "TestMultiControlPlane/serial/NodeLabels": 0.04, + "TestMultiControlPlane/serial/PingHostFromPods": 0.62, + "TestMultiControlPlane/serial/RestartCluster": 95.85, + "TestMultiControlPlane/serial/RestartClusterKeepsNodes": 167.16, + "TestMultiControlPlane/serial/RestartSecondaryNode": 33.68, + "TestMultiControlPlane/serial/StartCluster": 156.53, + "TestMultiControlPlane/serial/StopCluster": 31.53, + "TestMultiControlPlane/serial/StopSecondaryNode": 11, + "TestMultiNode/serial/AddNode": 28.17, + "TestMultiNode/serial/CopyFile": 6.25, + "TestMultiNode/serial/DeleteNode": 4.49, + "TestMultiNode/serial/DeployApp2Nodes": 2.75, + "TestMultiNode/serial/FreshStart2Nodes": 73.41, + "TestMultiNode/serial/MultiNodeLabels": 0.04, + "TestMultiNode/serial/PingHostFrom2Pods": 0.43, + "TestMultiNode/serial/ProfileList": 0.46, + "TestMultiNode/serial/RestartKeepsNodes": 67.93, + "TestMultiNode/serial/RestartMultiNode": 44.18, + "TestMultiNode/serial/StartAfterStop": 6.58, + "TestMultiNode/serial/StopMultiNode": 21.09, + "TestMultiNode/serial/StopNode": 1.81, + "TestMultiNode/serial/ValidateNameConflict": 20.92, + "TestNetworkPlugins/group/auto/DNS": 0.13, + "TestNetworkPlugins/group/auto/HairPin": 0.11, + "TestNetworkPlugins/group/auto/KubeletFlags": 0.21, + "TestNetworkPlugins/group/auto/Localhost": 0.11, + "TestNetworkPlugins/group/auto/NetCatPod": 10.31, + "TestNetworkPlugins/group/auto/Start": 55.69, + "TestNetworkPlugins/group/bridge/DNS": 0.1, + "TestNetworkPlugins/group/bridge/HairPin": 0.07, + "TestNetworkPlugins/group/bridge/KubeletFlags": 0.19, + "TestNetworkPlugins/group/bridge/Localhost": 0.08, + "TestNetworkPlugins/group/bridge/NetCatPod": 8.11, + "TestNetworkPlugins/group/bridge/Start": 57.65, + "TestNetworkPlugins/group/calico/ControllerPod": 6.01, + "TestNetworkPlugins/group/calico/DNS": 0.09, + "TestNetworkPlugins/group/calico/HairPin": 0.07, + "TestNetworkPlugins/group/calico/KubeletFlags": 0.19, + "TestNetworkPlugins/group/calico/Localhost": 0.06, + "TestNetworkPlugins/group/calico/NetCatPod": 9.1, + "TestNetworkPlugins/group/calico/Start": 42.53, + "TestNetworkPlugins/group/custom-flannel/DNS": 0.09, + "TestNetworkPlugins/group/custom-flannel/HairPin": 0.06, + "TestNetworkPlugins/group/custom-flannel/KubeletFlags": 0.22, + "TestNetworkPlugins/group/custom-flannel/Localhost": 0.07, + "TestNetworkPlugins/group/custom-flannel/NetCatPod": 8.13, + "TestNetworkPlugins/group/custom-flannel/Start": 46.16, + "TestNetworkPlugins/group/enable-default-cni/DNS": 0.09, + "TestNetworkPlugins/group/enable-default-cni/HairPin": 0.08, + "TestNetworkPlugins/group/enable-default-cni/KubeletFlags": 0.19, + "TestNetworkPlugins/group/enable-default-cni/Localhost": 0.07, + "TestNetworkPlugins/group/enable-default-cni/NetCatPod": 9.11, + "TestNetworkPlugins/group/enable-default-cni/Start": 59.43, + "TestNetworkPlugins/group/false/DNS": 0.09, + "TestNetworkPlugins/group/false/HairPin": 0.08, + "TestNetworkPlugins/group/false/KubeletFlags": 0.19, + "TestNetworkPlugins/group/false/Localhost": 0.08, + "TestNetworkPlugins/group/false/NetCatPod": 8.1, + "TestNetworkPlugins/group/false/Start": 35.39, + "TestNetworkPlugins/group/flannel/ControllerPod": 6, + "TestNetworkPlugins/group/flannel/DNS": 0.09, + "TestNetworkPlugins/group/flannel/HairPin": 0.07, + "TestNetworkPlugins/group/flannel/KubeletFlags": 0.2, + "TestNetworkPlugins/group/flannel/Localhost": 0.07, + "TestNetworkPlugins/group/flannel/NetCatPod": 9.09, + "TestNetworkPlugins/group/flannel/Start": 44.6, + "TestNetworkPlugins/group/kindnet/ControllerPod": 6, + "TestNetworkPlugins/group/kindnet/DNS": 0.1, + "TestNetworkPlugins/group/kindnet/HairPin": 0.08, + "TestNetworkPlugins/group/kindnet/KubeletFlags": 0.19, + "TestNetworkPlugins/group/kindnet/Localhost": 0.08, + "TestNetworkPlugins/group/kindnet/NetCatPod": 8.1, + "TestNetworkPlugins/group/kindnet/Start": 53.77, + "TestNetworkPlugins/group/kubenet/DNS": 0.09, + "TestNetworkPlugins/group/kubenet/HairPin": 0.07, + "TestNetworkPlugins/group/kubenet/KubeletFlags": 0.2, + "TestNetworkPlugins/group/kubenet/Localhost": 0.08, + "TestNetworkPlugins/group/kubenet/NetCatPod": 7.1, + "TestNetworkPlugins/group/kubenet/Start": 56.1, + "TestNoKubernetes/serial/ProfileList": 18.2, + "TestNoKubernetes/serial/Start": 4.61, + "TestNoKubernetes/serial/StartNoArgs": 8.35, + "TestNoKubernetes/serial/StartNoK8sWithVersion": 0.05, + "TestNoKubernetes/serial/StartWithK8s": 26.87, + "TestNoKubernetes/serial/StartWithStopK8s": 15.59, + "TestNoKubernetes/serial/Stop": 3.86, + "TestNoKubernetes/serial/VerifyK8sNotRunning": 0.19, + "TestNoKubernetes/serial/VerifyK8sNotRunningSecond": 0.21, + "TestOffline": 73.21, + "TestPause/serial/DeletePaused": 1.76, + "TestPause/serial/Pause": 0.35, + "TestPause/serial/PauseAgain": 0.42, + "TestPause/serial/SecondStartNoReconfiguration": 57.26, + "TestPause/serial/Start": 57.99, + "TestPause/serial/Unpause": 2.2, + "TestPause/serial/VerifyDeletedResources": 0.53, + "TestPause/serial/VerifyStatus": 0.21, + "TestPreload": 123.21, + "TestRunningBinaryUpgrade": 42.16, + "TestScheduledStopUnix": 21.4, + "TestSkaffold": 64.02, + "TestStartStop/group/default-k8s-diff-port/serial/AddonExistsAfterStop": 5.04, + "TestStartStop/group/default-k8s-diff-port/serial/DeployApp": 8.26, + "TestStartStop/group/default-k8s-diff-port/serial/EnableAddonAfterStop": 0.13, + "TestStartStop/group/default-k8s-diff-port/serial/EnableAddonWhileActive": 0.52, + "TestStartStop/group/default-k8s-diff-port/serial/FirstStart": 65.56, + "TestStartStop/group/default-k8s-diff-port/serial/Pause": 1.5, + "TestStartStop/group/default-k8s-diff-port/serial/SecondStart": 43.76, + "TestStartStop/group/default-k8s-diff-port/serial/Stop": 10.51, + "TestStartStop/group/default-k8s-diff-port/serial/UserAppExistsAfterStop": 6, + "TestStartStop/group/default-k8s-diff-port/serial/VerifyKubernetesImages": 0.16, + "TestStartStop/group/embed-certs/serial/AddonExistsAfterStop": 5.04, + "TestStartStop/group/embed-certs/serial/DeployApp": 8.14, + "TestStartStop/group/embed-certs/serial/EnableAddonAfterStop": 0.1, + "TestStartStop/group/embed-certs/serial/EnableAddonWhileActive": 0.5, + "TestStartStop/group/embed-certs/serial/FirstStart": 61.62, + "TestStartStop/group/embed-certs/serial/Pause": 1.49, + "TestStartStop/group/embed-certs/serial/SecondStart": 47.78, + "TestStartStop/group/embed-certs/serial/Stop": 10.63, + "TestStartStop/group/embed-certs/serial/UserAppExistsAfterStop": 6, + "TestStartStop/group/embed-certs/serial/VerifyKubernetesImages": 0.16, + "TestStartStop/group/newest-cni/serial/AddonExistsAfterStop": 0, + "TestStartStop/group/newest-cni/serial/DeployApp": 0, + "TestStartStop/group/newest-cni/serial/EnableAddonAfterStop": 0.1, + "TestStartStop/group/newest-cni/serial/EnableAddonWhileActive": 0.46, + "TestStartStop/group/newest-cni/serial/FirstStart": 25.31, + "TestStartStop/group/newest-cni/serial/Pause": 1.53, + "TestStartStop/group/newest-cni/serial/SecondStart": 9.54, + "TestStartStop/group/newest-cni/serial/Stop": 10.49, + "TestStartStop/group/newest-cni/serial/UserAppExistsAfterStop": 0, + "TestStartStop/group/newest-cni/serial/VerifyKubernetesImages": 0.16, + "TestStartStop/group/no-preload/serial/AddonExistsAfterStop": 5.04, + "TestStartStop/group/no-preload/serial/DeployApp": 8.17, + "TestStartStop/group/no-preload/serial/EnableAddonAfterStop": 0.1, + "TestStartStop/group/no-preload/serial/EnableAddonWhileActive": 0.56, + "TestStartStop/group/no-preload/serial/FirstStart": 68.75, + "TestStartStop/group/no-preload/serial/Pause": 1.52, + "TestStartStop/group/no-preload/serial/SecondStart": 45.82, + "TestStartStop/group/no-preload/serial/Stop": 10.49, + "TestStartStop/group/no-preload/serial/UserAppExistsAfterStop": 6, + "TestStartStop/group/no-preload/serial/VerifyKubernetesImages": 0.16, + "TestStartStop/group/old-k8s-version/serial/AddonExistsAfterStop": 5.05, + "TestStartStop/group/old-k8s-version/serial/DeployApp": 7.16, + "TestStartStop/group/old-k8s-version/serial/EnableAddonAfterStop": 0.1, + "TestStartStop/group/old-k8s-version/serial/EnableAddonWhileActive": 0.54, + "TestStartStop/group/old-k8s-version/serial/FirstStart": 38.02, + "TestStartStop/group/old-k8s-version/serial/Pause": 1.63, + "TestStartStop/group/old-k8s-version/serial/SecondStart": 47.12, + "TestStartStop/group/old-k8s-version/serial/Stop": 10.48, + "TestStartStop/group/old-k8s-version/serial/UserAppExistsAfterStop": 6, + "TestStartStop/group/old-k8s-version/serial/VerifyKubernetesImages": 0.55, + "TestStoppedBinaryUpgrade/MinikubeLogs": 0.5, + "TestStoppedBinaryUpgrade/Setup": 1.28, + "TestStoppedBinaryUpgrade/Upgrade": 55.38 + }, + "TotalDuration": 2598.98, + "GopoghVersion": "v0.0.0-unset", + "GopoghBuild": "", + "Detail": { + "Name": "Docker_Linux", + "Details": "e2222ae36f11d3515cb4a1cbfbc513a974c210e6:2025-11-02", + "PR": "21807", + "RepoName": "github.com/kubernetes/minikube/" + } +} +minikube: FAIL +make: *** [hack/prow/prow.mk:8: integration-prow-docker-docker-linux-x86-64] Error 1 ++ EXIT_VALUE=2 ++ set +o xtrace +Cleaning up after docker in docker. +================================================================================ +Cleaning up after docker +Stopping Docker: dockerProgram process in pidfile '/var/run/docker-ssd.pid', 1 process(es), refused to die. +================================================================================ +Done cleaning up after docker in docker. diff --git a/hack/prow/common.sh b/hack/prow/common.sh new file mode 100755 index 000000000000..db1d71dd0922 --- /dev/null +++ b/hack/prow/common.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The script expects the following env variables: +# OS: The operating system +# ARCH: The architecture +# DRIVER: the driver to use for the test +# CONTAINER_RUNTIME: the container runtime to use for the test +# EXTRA_START_ARGS: additional flags to pass into minikube start +# EXTRA_TEST_ARGS: additional flags to pass into go test +# JOB_NAME: the name of the logfile and check name to update on github +# PULL_NUMBER: the PR number, if applicable + +function print_test_info() { + echo ">> Starting at $(date)" + echo "" + echo "user: $(whoami)" + echo "arch: ${OS_ARCH}" + echo "pr: ${PULL_NUMBER}" + echo "driver: ${DRIVER}" + echo "runtime: ${CONTAINER_RUNTIME}" + echo "job: ${JOB_NAME}" + echo "test home: ${TEST_HOME}" + echo "kernel: $(uname -v)" + echo "uptime: $(uptime)" + # Setting KUBECONFIG prevents the version check from erroring out due to permission issues + echo "kubectl: $(env KUBECONFIG=${TEST_HOME} kubectl version --client)" + echo "docker: $(docker version --format '{{ .Client.Version }}')" + echo "podman: $(sudo podman version --format '{{.Version}}' || true)" + echo "go: $(go version || true)" + + case "${DRIVER}" in + kvm2) + echo "virsh: $(virsh --version)" + ;; + virtualbox) + echo "vbox: $(vboxmanage --version)" + ;; + vfkit) + echo "vfkit: $(vfkit --version)" + ;; + krunkit) + echo "krunkit: $(krunkit --version)" + ;; + esac + + echo "" +} + +function install_dependencies() { + # We need pstree for the restart cronjobs + if [ "$(uname)" != "Darwin" ]; then + sudo apt-get -y install lsof psmisc dnsutils + else + brew install pstree coreutils pidof + ln -s /usr/local/bin/gtimeout /usr/local/bin/timeout || true + fi + # install golang if not present + sudo hack/prow/installer/check_install_golang.sh /usr/local 1.24.5 || true + # install gotestsum if not present + GOROOT="/usr/local/go" hack/prow/installer/check_install_gotestsum.sh || true + # install gopogh + hack/prow/installer/check_install_gopogh.sh || true + + # install jq + if ! type "jq" >/dev/null; then + echo ">> Installing jq" + if [ "${ARCH}" == "arm64" && "${OS}" == "linux" ]; then + sudo apt-get install jq -y + elif [ "${ARCH}" == "arm64" ]; then + echo "Unable to install 'jq' automatically for arm64 on Darwin, please install 'jq' manually." + exit 5 + elif [ "${OS}" != "darwin" ]; then + curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && sudo install jq-linux64 /usr/local/bin/jq + else + curl -LO https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 && sudo install jq-osx-amd64 /usr/local/bin/jq + fi + fi + +} + +function docker_setup() { + + # clean all docker artifacts up + docker system prune -a --volumes -f || true + docker system df || true + docker rm -f -v $(docker ps -aq) >/dev/null 2>&1 || true + + # read only token, never expires + #todo: do we need this token + # docker login -u minikubebot -p "$DOCKERHUB_READONLY_TOKEN" +} + +function gvisor_image_build() { + # Build the gvisor image so that we can integration test changes to pkg/gvisor + chmod +x testdata/gvisor-addon + # skipping gvisor mac because ofg https://github.com/kubernetes/minikube/issues/5137 + if [ "$(uname)" != "Darwin" ]; then + # Should match GVISOR_IMAGE_VERSION in Makefile + docker build -t gcr.io/k8s-minikube/gvisor-addon:2 -f testdata/gvisor-addon-Dockerfile ./testdata + fi +} + +function run_gopogh() { + # todo: currently we do not save to gopogh db + echo "Not saving to DB" + gopogh -in "${JSON_OUT}" -out_html "${HTML_OUT}" -out_summary "${SUMMARY_OUT}" -name "${JOB_NAME}" -pr "${PULL_NUMBER}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}:$(date +%Y-%m-%d)" + +} + +# this is where the script starts +readonly OS_ARCH="${OS}-${ARCH}" +readonly TEST_ROOT="${HOME}/minikube-integration" +readonly TEST_HOME="${TEST_ROOT}/${MINIKUBE_LOCATION}-$$" + +export GOPATH="$HOME/go" +export KUBECONFIG="${TEST_HOME}/kubeconfig" +export PATH=$PATH:"/usr/local/bin/:/usr/local/go/bin/:$GOPATH/bin" +export MINIKUBE_SUPPRESS_DOCKER_PERFORMANCE=true + +readonly TIMEOUT=120m + +cp -r test/integration/testdata . + +# Add the out/ directory to the PATH, for using new drivers. +export PATH="$(pwd)/out/":$PATH +mkdir -p "${TEST_ROOT}" +mkdir -p "${TEST_HOME}" +export MINIKUBE_HOME="${TEST_HOME}/.minikube" +export MINIKUBE_BIN="out/minikube-${OS_ARCH}" +export E2E_BIN="out/e2e-${OS_ARCH}" + +install_dependencies +docker_setup + + +if [ "$CONTAINER_RUNTIME" == "containerd" ]; then + cp out/gvisor-addon testdata/ + gvisor_image_build +fi + +print_test_info + +readonly TEST_OUT="${TEST_HOME}/testout.txt" +readonly JSON_OUT="${TEST_HOME}/test.json" +readonly JUNIT_OUT="${TEST_HOME}/junit-unit.xml" +readonly HTML_OUT="${TEST_HOME}/test.html" +readonly SUMMARY_OUT="${TEST_HOME}/test_summary.json" + +touch "${TEST_OUT}" +touch "${JSON_OUT}" +touch "${JUNIT_OUT}" +touch "${HTML_OUT}" +touch "${SUMMARY_OUT}" + +e2e_start_time="$(date -u +%s)" +echo "" +echo ">> Starting ${E2E_BIN} at $(date)" +set -x + +EXTRA_START_ARGS="${EXTRA_START_ARGS} --container-runtime=${CONTAINER_RUNTIME}" +echo $PATH +gotestsum --jsonfile "${JSON_OUT}" --junitfile="${JUNIT_OUT}" -f standard-verbose --raw-command -- \ + go tool test2json -t \ + ${E2E_BIN} \ + -minikube-start-args="--driver=${DRIVER} ${EXTRA_START_ARGS}" \ + -test.timeout=${TIMEOUT} -test.v \ + ${EXTRA_TEST_ARGS} \ + -binary="${MINIKUBE_BIN}" 2>&1 | + tee "${TEST_OUT}" + +result=${PIPESTATUS[0]} # capture the exit code of the first cmd in pipe. +set +x +echo ">> ${E2E_BIN} exited with ${result} at $(date)" +echo "" + +# calculate the time took to finish running e2e binary test. +e2e_end_time="$(date -u +%s)" +elapsed=$(($e2e_end_time - $e2e_start_time)) +min=$(($elapsed / 60)) +sec=$(tail -c 3 <<<$((${elapsed}00 / 60))) +elapsed=$min.$sec + +#todo: currently we skip gopogh upload , we shall add it back +run_gopogh + +# according to prow's requirement, upload the test report to $ARTIFACTS +cp ${TEST_OUT} . +cp ${JSON_OUT} . +cp ${JUNIT_OUT} . +cp ${HTML_OUT} . +cp ${SUMMARY_OUT} . +if [[ $result -eq 0 ]]; then + echo "minikube: SUCCESS" +else + echo "minikube: FAIL" +fi + +exit "$result" diff --git a/hack/prow/docker.json b/hack/prow/docker.json new file mode 100644 index 000000000000..ee4aa99dd2e9 --- /dev/null +++ b/hack/prow/docker.json @@ -0,0 +1,3 @@ +{ + "Image": "debian" +} \ No newline at end of file diff --git a/hack/prow/installer/check_install_docker.sh b/hack/prow/installer/check_install_docker.sh new file mode 100755 index 000000000000..632dbbfa6512 --- /dev/null +++ b/hack/prow/installer/check_install_docker.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +ARCH=${ARCH:=amd64} + + +echo "Installing latest docker" +curl -fsSL https://get.docker.com -o get-docker.sh +sudo sh get-docker.sh +rm get-docker.sh + +sudo usermod -aG docker minitest || true + +echo "Installing latest kubectl" +curl -LO "https://dl.k8s.io/release/$(curl -sL https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" +sudo install ./kubectl /usr/local/bin/kubectl diff --git a/hack/prow/installer/check_install_golang.sh b/hack/prow/installer/check_install_golang.sh new file mode 100755 index 000000000000..31937c60a212 --- /dev/null +++ b/hack/prow/installer/check_install_golang.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script requires two parameters: +# $1. INSTALL_PATH: The path to install the golang binary +# $2. GO_VERSION: The version of golang to install + + + +set -eux -o pipefail + +if (($# < 2)); then + echo "ERROR: given ! ($#) parameters but expected 2." + echo "USAGE: ./check_install_golang.sh INSTALL_PATH GO_VERSION" + exit 1 +fi + +VERSION_TO_INSTALL=${2} +INSTALL_PATH=${1} + +function current_arch() { + case $(arch) in + "x86_64" | "i386") + echo "amd64" + ;; + "aarch64" | "arm64") + echo "arm64" + ;; + *) + echo "unexpected arch: $(arch). use amd64" 1>&2 + echo "amd64" + ;; + esac +} + +ARCH=${ARCH:=$(current_arch)} + +# installs or updates golang if right version doesn't exists +function check_and_install_golang() { + if ! go version &>/dev/null; then + echo "WARNING: No golang installation found in your environment." + install_golang "$VERSION_TO_INSTALL" "$INSTALL_PATH" + return + fi + + sudo chown -R $USER:$USER "$INSTALL_PATH"/go + + # golang has been installed and check its version + if [[ $(go version | cut -d' ' -f 3) =~ go(([0-9]+)\.([0-9]+).([0-9]+)*) ]]; then + HOST_VERSION=${BASH_REMATCH[1]} + if [ $HOST_VERSION == $VERSION_TO_INSTALL ]; then + echo "go version on the host looks good : $HOST_VERSION" + else + echo "WARNING: expected go version to be $VERSION_TO_INSTALL but got $HOST_VERSION" + install_golang "$VERSION_TO_INSTALL" "$INSTALL_PATH" + fi + else + echo "ERROR: Failed to parse golang version: $(go version)" + return + fi +} + +# install_golang takes two parameters version and path to install. +function install_golang() { + local -r GO_VER="$1" + local -r GO_DIR="$2/go" + echo "Installing golang version: $GO_VER in $GO_DIR" + + INSTALLOS=linux + if [[ "$OSTYPE" == "darwin"* ]]; then + INSTALLOS=darwin + fi + + local -r GO_TGZ="go${GO_VER}.${INSTALLOS}-${ARCH}.tar.gz" + pushd /tmp + + # using sudo because previously installed versions might have been installed by a different user. + sudo rm -rf "$GO_TGZ" + curl -qL -O "https://storage.googleapis.com/golang/$GO_TGZ" + sudo rm -rf "$GO_DIR" + sudo mkdir -p "$GO_DIR" + sudo tar -C "$GO_DIR" --strip-components=1 -xzf "$GO_TGZ" + + popd >/dev/null + echo "installed in $GO_DIR: $($GO_DIR/bin/go version)" +} + +check_and_install_golang diff --git a/hack/prow/installer/check_install_gopogh.sh b/hack/prow/installer/check_install_gopogh.sh new file mode 100755 index 000000000000..054344f6e68c --- /dev/null +++ b/hack/prow/installer/check_install_gopogh.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this script requires golang to be installed + +set -eux -o pipefail + +go install github.com/medyagh/gopogh/cmd/gopogh@v0.29.0 diff --git a/hack/prow/installer/check_install_gotestsum.sh b/hack/prow/installer/check_install_gotestsum.sh new file mode 100755 index 000000000000..7a27ba67875f --- /dev/null +++ b/hack/prow/installer/check_install_gotestsum.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Copyright 2021 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eux -o pipefail + +function install_gotestsum() { + rm -f $(which gotestsum) + sudo env "GOBIN=$GOROOT/bin" "PATH=$PATH" go install gotest.tools/gotestsum@v1.12.3 +} +echo "Installing gotestsum" +which gotestsum || install_gotestsum diff --git a/hack/prow/integration_docker_docker_linux_x86-64.sh b/hack/prow/integration_docker_docker_linux_x86-64.sh new file mode 100755 index 000000000000..b82bba73874f --- /dev/null +++ b/hack/prow/integration_docker_docker_linux_x86-64.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +OS="linux" +ARCH="amd64" +DRIVER="docker" +CONTAINER_RUNTIME="docker" +EXTRA_START_ARGS="" +EXTRA_TEST_ARGS="" +JOB_NAME="Docker_Linux" + +git config --global --add safe.directory '*' +COMMIT=$(git rev-parse HEAD) +MINIKUBE_LOCATION=$COMMIT + + +# when docker is the driver, we run integration tests directly in prow cluster +# by default, prow jobs run in root, so we must switch to a non-root user to run docker driver + + +source ./hack/prow/common.sh diff --git a/hack/prow/integration_kvm_docker_linux_x86-64.sh b/hack/prow/integration_kvm_docker_linux_x86-64.sh new file mode 100755 index 000000000000..031904345b65 --- /dev/null +++ b/hack/prow/integration_kvm_docker_linux_x86-64.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +OS="linux" +ARCH="amd64" +DRIVER="kvm2" +CONTAINER_RUNTIME="docker" +# in prow, if you want libvirtd to be run, you have to start a privileged container as root +EXTRA_START_ARGS="" +EXTRA_TEST_ARGS="" +JOB_NAME="KVM_Linux" +git config --global --add safe.directory '*' +COMMIT=$(git rev-parse HEAD) +MINIKUBE_LOCATION=$COMMIT +echo "running test in $(pwd)" + +set +e +sleep 5 # wait for libvirtd to be running +echo "=========libvirtd status==========" +sudo systemctl status libvirtd +echo "=========Check virtualization support==========" +grep -E -q 'vmx|svm' /proc/cpuinfo && echo yes || echo no +echo "=========virt-host-validate==========" +virt-host-validate + +set -e +source ./hack/prow/common.sh diff --git a/hack/prow/integration_kvm_docker_linux_x86-64_pre.sh b/hack/prow/integration_kvm_docker_linux_x86-64_pre.sh new file mode 100755 index 000000000000..6a4ea333133d --- /dev/null +++ b/hack/prow/integration_kvm_docker_linux_x86-64_pre.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +OS="linux" +ARCH="amd64" +DRIVER="kvm2" +CONTAINER_RUNTIME="docker" +# in prow, if you want libvirtd to be run, you have to start a privileged container as root +EXTRA_START_ARGS="" +EXTRA_TEST_ARGS="" +JOB_NAME="KVM_Linux" + + +# install docker if not present +ARCH="$ARCH" hack/prow/installer/check_install_docker.sh || true +sudo adduser $(whoami) docker || true + +sudo apt-get update +sudo apt-get -y install qemu-system qemu-kvm libvirt-clients libvirt-daemon-system ebtables iptables dnsmasq +sudo adduser $(whoami) libvirt || true + +# start libvirtd +sudo systemctl start libvirtd diff --git a/hack/prow/kvm.json b/hack/prow/kvm.json new file mode 100644 index 000000000000..fba955648eaf --- /dev/null +++ b/hack/prow/kvm.json @@ -0,0 +1,9 @@ +{ + "GCPZone": "us-central1-b", + "InstanceImage": "ubuntu-os-cloud/ubuntu-2404-lts-amd64", + "InstanceType": "n2-standard-32", + "DiskGiB": 300, + "BoskosAcquireTimeoutSeconds": 180, + "BoskosHeartbeatIntervalSeconds": 10, + "BoskosLocation": "http://boskos.test-pods.svc.cluster.local" +} \ No newline at end of file diff --git a/hack/prow/minikube_cross_build.sh b/hack/prow/minikube_cross_build.sh new file mode 100755 index 000000000000..5571f0f00682 --- /dev/null +++ b/hack/prow/minikube_cross_build.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script requires 3 parameter: +# $1. GO_VERSION: The version of golang to install +# $2. OS: The operating system +# $3. ARCH: The architecture +set -eux -o pipefail +GO_VERSION=$1 +OS=$2 +ARCH=$3 +readonly OS_ARCH="${OS}-${ARCH}" + +echo "running build in $(pwd), current pr number: ${PULL_NUMBER:-none}" + +# hack/prow/installer/check_install_golang.sh /usr/local $GO_VERSION +# declare -rx GOPATH="$HOME/go" + +declare -rx BUILD_IN_DOCKER=y +make -j 16 \ + out/minikube-${OS_ARCH} \ + out/e2e-${OS_ARCH} \ + out/gvisor-addon \ +&& failed=$? || failed=$? + +export MINIKUBE_BIN="out/minikube-${OS_ARCH}" +export E2E_BIN="out/e2e-${OS_ARCH}" +chmod +x "${MINIKUBE_BIN}" "${E2E_BIN}" + +BUILT_VERSION=$("out/minikube-$(go env GOOS)-$(go env GOARCH)" version) +echo ${BUILT_VERSION} + +COMMIT=$(echo ${BUILT_VERSION} | grep 'commit:' | awk '{print $2}') +if (echo ${COMMIT} | grep -q dirty); then + echo "'minikube version' reports dirty commit: ${COMMIT}" + exit 1 +fi + +if [[ "${failed}" -ne 0 ]]; then + echo "build failed" + exit "${failed}" +fi diff --git a/hack/prow/minitest/deployer/boskos_config.go b/hack/prow/minitest/deployer/boskos_config.go new file mode 100644 index 000000000000..5af22518a465 --- /dev/null +++ b/hack/prow/minitest/deployer/boskos_config.go @@ -0,0 +1,28 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +type MiniTestBoskosConfig struct { + GCPZone string + InstanceImage string + InstanceType string + DiskGiB int + // Boskos flags correspond to https://github.com/kubernetes-sigs/kubetest2/blob/71238a9645df6fbd7eaac9a36f635c22f1566168/kubetest2-gce/deployer/deployer.go + BoskosAcquireTimeoutSeconds int + BoskosHeartbeatIntervalSeconds int + BoskosLocation string +} diff --git a/hack/prow/minitest/deployer/boskos_deployer.go b/hack/prow/minitest/deployer/boskos_deployer.go new file mode 100644 index 000000000000..61373c2c6054 --- /dev/null +++ b/hack/prow/minitest/deployer/boskos_deployer.go @@ -0,0 +1,273 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/google/uuid" + "k8s.io/klog/v2" + "sigs.k8s.io/boskos/client" + "sigs.k8s.io/kubetest2/pkg/boskos" +) + +const ( + // gceProjectResourceType is called "gce" project in Boskos, + // while it is called "gcp" project in this CLI. + gceProjectResourceType = "gce-project" + runDirSSHKeys = "gce-ssh-keys" + remoteUserName = "minitest" +) + +type MiniTestBosKosDeployer struct { + ctx context.Context + config *MiniTestBoskosConfig + isUp bool + + id string + gcpProject string + remoteUserName string + networkName string + firewallRuleName string + instanceName string + sshAddr string + + boskosClient *client.Client + // this channel serves as a signal channel for the hearbeat goroutine + // so that it can be explicitly closed + boskosHeartbeatClose chan struct{} +} + +func NewMiniTestBosKosDeployerFromConfigFile(path string) MiniTestDeployer { + config := MiniTestBoskosConfig{} + data, err := os.ReadFile(path) + if err != nil { + klog.Fatalf("failed to read config file %s: %v", path, err) + } + if err := json.Unmarshal(data, &config); err != nil { + klog.Fatalf("failed to parse config file %s: %v", path, err) + } + return NewMiniTestBosKosDeployer(&config) +} + +func NewMiniTestBosKosDeployer(config *MiniTestBoskosConfig) MiniTestDeployer { + boskosClient, err := boskos.NewClient(config.BoskosLocation) + if err != nil { + klog.Fatalf("failed to make boskos client: %v", err) + } + id := uuid.New().String()[:8] + return &MiniTestBosKosDeployer{ + ctx: context.TODO(), + config: config, + boskosClient: boskosClient, + boskosHeartbeatClose: make(chan struct{}), + remoteUserName: remoteUserName, + id: id, + networkName: "minitest-network-" + id, + firewallRuleName: "minitest-firewall-" + id, + instanceName: "minitest-vm-" + id, + } +} + +func (m *MiniTestBosKosDeployer) Up() error { + + if err := m.requestGCPProject(); err != nil { + klog.Errorf("Failed to request gcp project from boskos: %v", err) + return err + } + if err := m.gcpVMSetUp(); err != nil { + klog.Errorf("Failed to start a vm in gcp: %v", err) + return err + } + if err := m.gcpSSHSetUp(); err != nil { + klog.Errorf("Failed to set up ssh: %v", err) + return err + } + if err := sshConnectionCheck(m.ctx, m.remoteUserName, m.sshAddr, nil); err != nil { + klog.Errorf("Failed to conntect via ssh: %v", err) + return err + } + + klog.Infof("Successfully started vm in gcp: %s", m.instanceName) + m.isUp = true + return nil + +} + +func (m *MiniTestBosKosDeployer) Down() error { + //todo: clean up the VM? + + klog.Info("Releasing bosko project") + if m.boskosClient == nil { + return fmt.Errorf("m.boskosClient not set") + } + err := boskos.Release( + m.boskosClient, + []string{m.gcpProject}, + m.boskosHeartbeatClose, + ) + if err != nil { + return fmt.Errorf("down failed to release boskos project: %v", err) + } + m.isUp = false + return nil + +} + +func (m *MiniTestBosKosDeployer) IsUp() (bool, error) { + return m.isUp, nil +} +func (m *MiniTestBosKosDeployer) Execute(args ...string) error { + return executeSSHCommand(m.ctx, m.remoteUserName, m.sshAddr, nil, args...) +} + +func (m *MiniTestBosKosDeployer) SyncToRemote(src string, dst string, excludedPattern []string) error { + excludedArgs := make([]string, 0, len(excludedPattern)*2) + for _, pattern := range excludedPattern { + excludedArgs = append(excludedArgs, "--exclude", pattern) + } + dstRemote := fmt.Sprintf("%s@%s:%s", m.remoteUserName, m.sshAddr, dst) + return executeRsyncSSHCommand(m.ctx, nil, src, dstRemote, excludedArgs) +} + +func (m *MiniTestBosKosDeployer) SyncToHost(src string, dst string, excludedPattern []string) error { + excludedArgs := make([]string, 0, len(excludedPattern)*2) + for _, pattern := range excludedPattern { + excludedArgs = append(excludedArgs, "--exclude", pattern) + } + srcRemote := fmt.Sprintf("%s@%s:%s", m.remoteUserName, m.sshAddr, src) + return executeRsyncSSHCommand(m.ctx, nil, srcRemote, dst, excludedArgs) +} + +func (m *MiniTestBosKosDeployer) requestGCPProject() error { + + resource, err := boskos.Acquire( + m.boskosClient, + gceProjectResourceType, + time.Duration(m.config.BoskosAcquireTimeoutSeconds)*time.Second, + time.Duration(m.config.BoskosHeartbeatIntervalSeconds)*time.Second, + m.boskosHeartbeatClose, + ) + + if err != nil { + return fmt.Errorf("failed to get project from boskos: %v", err) + } + if resource.Name == "" { + return fmt.Errorf("boskos returned an empty resource name, resource: %v", resource) + } + + klog.Infof("Got project %q from boskos", resource.Name) + m.gcpProject = resource.Name + return nil +} + +func (m *MiniTestBosKosDeployer) gcpVMSetUp() error { + // ensure gcp zone is set to start a vm + if m.config.GCPZone == "" { + return fmt.Errorf("GCPZone is not set") + } + + // execute gcloud commands to set environment up a vm + if err := m.executeLocalGloudCommand("services", "enable", "compute.googleapis.com"); err != nil { + klog.Warningf("failed to enable service: %v", err) + } + if err := m.executeLocalGloudCommand("compute", "networks", "create", m.networkName); err != nil { + klog.Warningf("failed to set up network: %v", err) + } + if err := m.executeLocalGloudCommand("compute", "firewall-rules", "create", m.firewallRuleName, "--network="+m.networkName, "--allow=tcp:22"); err != nil { + klog.Warningf("failed to set up firewalls: %v", err) + } + + // create the vm + description := fmt.Sprintf("%s instance (login ID: %q)", m.instanceName, m.remoteUserName) + instImgPair := strings.SplitN(m.config.InstanceImage, "/", 2) + if err := m.executeLocalGloudCommand("compute", "instances", "create", + "--enable-nested-virtualization", + "--zone="+m.config.GCPZone, + "--description="+description, + "--network="+m.networkName, + "--image-project="+instImgPair[0], + "--image-family="+instImgPair[1], + "--machine-type="+m.config.InstanceType, + fmt.Sprintf("--boot-disk-size=%dGiB", m.config.DiskGiB), + "--boot-disk-type=pd-ssd", + "--metadata=block-project-ssh-keys=TRUE", + + m.instanceName, + ); err != nil { + klog.Errorf("failed to start a gcp vm: %v", err) + return err + } + + return nil + +} + +func (m *MiniTestBosKosDeployer) gcpSSHSetUp() error { + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed to find home dir: %v", err) + } + gcePubPath := filepath.Join(home, ".ssh/google_compute_engine.pub") + gcePubContent, err := os.ReadFile(gcePubPath) + if err != nil { + return fmt.Errorf("failed to read GCE public key: %v", err) + } + sshKeysContent := []byte(m.remoteUserName + ":" + string(gcePubContent)) + runDir, err := os.MkdirTemp("", "rundir") + if err != nil { + return fmt.Errorf("failed to create temp dir: %v", err) + } + sshKeysPath := filepath.Join(runDir, runDirSSHKeys) + _ = os.RemoveAll(sshKeysPath) + if err = os.WriteFile(sshKeysPath, sshKeysContent, 0400); err != nil { + return fmt.Errorf("failed to create new public key file: %v", err) + } + + // set up ssh login with pub key + if err := m.executeLocalGloudCommand("compute", "instances", "add-metadata", m.instanceName, "--zone="+m.config.GCPZone, "--metadata-from-file=ssh-keys="+sshKeysPath); err != nil { + klog.Warningf("failed to add metadata: %v", err) + // continue anyway + } + // update the local ssh config + if err := m.executeLocalGloudCommand("compute", "config-ssh"); err != nil { + klog.Warningf("failed to compute ssh: %v", err) + // continue anyway + } + + // set sshAddr field + if m.instanceName == "" || m.config.GCPZone == "" || m.gcpProject == "" { + return fmt.Errorf("gcp project configuration incorrect: %s.%s.%s", m.instanceName, m.config.GCPZone, m.gcpProject) + } + m.sshAddr = fmt.Sprintf("%s.%s.%s", m.instanceName, m.config.GCPZone, m.gcpProject) + + return nil +} + +func (m *MiniTestBosKosDeployer) executeLocalGloudCommand(args ...string) error { + + if err := executeLocalCommand(m.ctx, "gcloud", append([]string{"--project=" + m.gcpProject}, args...)...); err != nil { + return fmt.Errorf("failed to run %v: %v", args, err) + } + return nil +} diff --git a/hack/prow/minitest/deployer/deployer.go b/hack/prow/minitest/deployer/deployer.go new file mode 100644 index 000000000000..b97bd8158c8b --- /dev/null +++ b/hack/prow/minitest/deployer/deployer.go @@ -0,0 +1,32 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +type MiniTestDeployer interface { + // Up should provision the environment for testing + Up() error + // Down should tear down the environment if any + Down() error + // IsUp should return true if a test cluster is successfully provisioned + IsUp() (bool, error) + // Execute execute a command in the deployed environment + Execute(args ...string) error + // SyncToRemote sync file or folder from src on host to dst on deployed environment + SyncToRemote(src string, dst string, excludedPattern []string) error + // SyncToRemote sync file or folder from src on remote to host + SyncToHost(src string, dst string, excludedPattern []string) error +} diff --git a/hack/prow/minitest/deployer/docker_config.go b/hack/prow/minitest/deployer/docker_config.go new file mode 100644 index 000000000000..f7d4e8b8de7b --- /dev/null +++ b/hack/prow/minitest/deployer/docker_config.go @@ -0,0 +1,21 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +type MiniTestDockerConfig struct { + Image string +} diff --git a/hack/prow/minitest/deployer/docker_deployer.go b/hack/prow/minitest/deployer/docker_deployer.go new file mode 100644 index 000000000000..a33813792fce --- /dev/null +++ b/hack/prow/minitest/deployer/docker_deployer.go @@ -0,0 +1,314 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "os" + "strconv" + + "github.com/docker/go-connections/nat" + "github.com/google/uuid" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/client" + "github.com/phayes/freeport" + gossh "golang.org/x/crypto/ssh" + "k8s.io/klog/v2" +) + +var sshSetupScript = ` +#!/bin/bash +USERNAME=%s +apt update +apt install -y openssh-server rsync +service ssh start +useradd -m -s /bin/bash ${USERNAME} +passwd -d ${USERNAME} + +USER_HOME=$(eval echo "~${USERNAME}") +mkdir -p $USER_HOME/.ssh +chmod 700 $USER_HOME/.ssh +echo "%s" >> $USER_HOME/.ssh/authorized_keys +chmod 600 $USER_HOME/.ssh/authorized_keys +chown -R $USERNAME:$USERNAME "$USER_HOME/.ssh" +` + +type MiniTestDockerDeployer struct { + ctx context.Context + config *MiniTestDockerConfig + isUp bool + + dockerClient *client.Client + containerSHA string + + sshPrivateKeyFile string + sshPublicKeyFile string + sshPublicKeyContent string + sshPort string + + sshTempDir string + remoteUserName string +} + +func NewMiniTestDockerDeployerFromConfigFile(path string) MiniTestDeployer { + config := MiniTestDockerConfig{} + data, err := os.ReadFile(path) + if err != nil { + klog.Fatalf("failed to read config file %s: %v", path, err) + } + if err := json.Unmarshal(data, &config); err != nil { + klog.Fatalf("failed to parse config file %s: %v", path, err) + } + return NewMiniTestDockerDeployer(&config) + +} + +func NewMiniTestDockerDeployer(config *MiniTestDockerConfig) MiniTestDeployer { + return &MiniTestDockerDeployer{ + ctx: context.TODO(), + config: config, + isUp: false, + remoteUserName: remoteUserName, + } +} + +func (m *MiniTestDockerDeployer) Up() error { + dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("failed to create docker api client: %v", err) + } + m.dockerClient = dockerClient + + // pull the image + reader, err := dockerClient.ImagePull(m.ctx, m.config.Image, client.ImagePullOptions{}) + if err != nil { + return fmt.Errorf("failed to pull image %s: %v", m.config.Image, err) + } + defer reader.Close() + _, err = io.Copy(os.Stdout, reader) + if err != nil { + return fmt.Errorf("failed to read image pull response: %v", err) + } + // find a free port for ssh + port, err := freeport.GetFreePort() + if err != nil { + return fmt.Errorf("failed to get a free port for ssh: %v", err) + } + m.sshPort = strconv.Itoa(port) + klog.Infof("Using port %s for ssh", m.sshPort) + + // start the container + id := uuid.New().String()[:8] + //todo: remove hard coded ports + exposedPorts, portBindings, _ := nat.ParsePortSpecs([]string{m.sshPort + ":22"}) + response, err := dockerClient.ContainerCreate( + m.ctx, + &container.Config{ + Image: m.config.Image, + Tty: true, //-t + OpenStdin: true, // -i + //Entrypoint: []string{"/usr/sbin/init"}, + ExposedPorts: exposedPorts, + }, + + &container.HostConfig{ + Privileged: true, + PortBindings: portBindings, + }, nil, nil, "minitest-"+id) + if err != nil { + klog.Errorf("failed to create container from image %s: %v", m.config.Image, err) + return fmt.Errorf("failed to create container from image %s: %v", m.config.Image, err) + } + m.containerSHA = response.ID + + err = dockerClient.ContainerStart(m.ctx, m.containerSHA, + client.ContainerStartOptions{}) + if err != nil { + klog.Errorf("failed to start container %s: %v", m.containerSHA, err) + return fmt.Errorf("failed to start container %s: %v", m.containerSHA, err) + } + + // set up ssh keys + if err := m.sshSetUp(); err != nil { + klog.Errorf("failed to set up ssh: %v", err) + return fmt.Errorf("failed to set up ssh: %v", err) + } + + // set up sshd server + err = m.executeDockerShellCommand("root", fmt.Sprintf(sshSetupScript, m.remoteUserName, m.sshPublicKeyContent)) + if err != nil { + klog.Errorf("failed to set up sshd server in container %s: %v", m.containerSHA, err) + return fmt.Errorf("failed to set up sshd server in container %s: %v", m.containerSHA, err) + } + + // check ssh connectivity + if err := sshConnectionCheck(m.ctx, m.remoteUserName, "localhost", m.sshAdditionalArgs()); err != nil { + klog.Errorf("Failed to conntect via ssh: %v", err) + return fmt.Errorf("Failed to conntect via ssh: %v", err) + } + + m.isUp = true + return nil +} + +func (m *MiniTestDockerDeployer) Down() error { + os.RemoveAll(m.sshTempDir) + + if m.dockerClient == nil { + klog.Errorf("m.dockerClient not set") + return fmt.Errorf("m.dockerClient not set") + } + if err := m.dockerClient.ContainerRemove(m.ctx, m.containerSHA, client.ContainerRemoveOptions{Force: true}); err != nil { + klog.Errorf("failed to remove container %s: %v", m.containerSHA, err) + return fmt.Errorf("failed to remove container %s: %v", m.containerSHA, err) + } + // close the docker client + m.dockerClient.Close() + m.isUp = false + klog.Infof("Successfully removed container %s", m.containerSHA) + return nil +} +func (m *MiniTestDockerDeployer) IsUp() (bool, error) { + return m.isUp, nil +} + +func (m *MiniTestDockerDeployer) Execute(args ...string) error { + return executeSSHCommand(m.ctx, m.remoteUserName, "localhost", m.sshAdditionalArgs(), args...) +} + +func (m *MiniTestDockerDeployer) SyncToRemote(src string, dst string, excludedPattern []string) error { + excludedArgs := make([]string, 0, len(excludedPattern)*2) + for _, pattern := range excludedPattern { + excludedArgs = append(excludedArgs, "--exclude", pattern) + } + dstRemote := fmt.Sprintf("%s@%s:%s", m.remoteUserName, "localhost", dst) + return executeRsyncSSHCommand(m.ctx, m.sshAdditionalArgs(), src, dstRemote, excludedArgs) +} + +func (m *MiniTestDockerDeployer) SyncToHost(src string, dst string, excludedPattern []string) error { + excludedArgs := make([]string, 0, len(excludedPattern)*2) + for _, pattern := range excludedPattern { + excludedArgs = append(excludedArgs, "--exclude", pattern) + } + srcRemote := fmt.Sprintf("%s@%s:%s", m.remoteUserName, "localhost", src) + return executeRsyncSSHCommand(m.ctx, m.sshAdditionalArgs(), srcRemote, dst, excludedArgs) + //return executeScpCommand(m.ctx, m.remoteUserName, "localhost", m.scpAdditionalArgs(), src, dst) +} + +func (m *MiniTestDockerDeployer) executeDockerShellCommand(user string, args ...string) error { + execResp, err := m.dockerClient.ContainerExecCreate(m.ctx, m.containerSHA, container.ExecOptions{ + Cmd: append([]string{"/bin/bash", "-c"}, args...), + AttachStdout: true, + AttachStderr: true, + Tty: true, + User: user, + }) + if err != nil { + return fmt.Errorf("failed to create exec instance: %v", err) + } + + attachResp, err := m.dockerClient.ContainerExecAttach( + m.ctx, execResp.ID, + container.ExecAttachOptions{Tty: true}) + if err != nil { + return fmt.Errorf("failed to attach to exec instance: %v", err) + } + defer attachResp.Close() + if _, err = io.Copy(os.Stdout, attachResp.Reader); err != nil { + return fmt.Errorf("failed to read exec output: %v", err) + } + return nil + +} + +func (m *MiniTestDockerDeployer) sshSetUp() error { + // we generate a ssh key pair and add the public key to the container's authorized_keys + + sshTempDir, err := os.MkdirTemp("", "minitest-ssh-") + if err != nil { + return fmt.Errorf("failed to create temp dir for ssh keys: %v", err) + } + m.sshTempDir = sshTempDir + klog.Info("Created temp dir for ssh keys: ", sshTempDir) + + // create private key file + sshPrivateKeyFile, err := os.CreateTemp(sshTempDir, "id_rsa") + if err != nil { + return fmt.Errorf("failed to create temp file for private key: %v", err) + } + if sshPrivateKeyFile.Chmod(0600) != nil { + return fmt.Errorf("failed to chmod private key file: %v", err) + } + m.sshPrivateKeyFile = sshPrivateKeyFile.Name() + klog.Info("Created temp file for private key: ", m.sshPrivateKeyFile) + + // create public key file + sshPublicKeyFile, err := os.CreateTemp(sshTempDir, "id_rsa.pub") + if err != nil { + return fmt.Errorf("failed to create temp file for public key: %v", err) + } + if sshPublicKeyFile.Chmod(0644) != nil { + return fmt.Errorf("failed to chmod public key file: %v", err) + } + m.sshPublicKeyFile = sshPublicKeyFile.Name() + klog.Info("Created temp file for public key: ", m.sshPublicKeyFile) + + // generate private key and convert to PEM format + privateKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return fmt.Errorf("failed to generate private key: %v", err) + } + privDER := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: privDER, + } + + if err := pem.Encode(sshPrivateKeyFile, privBlock); err != nil { + sshPrivateKeyFile.Close() + return fmt.Errorf("failed to write private key to file: %v", err) + } + + pub, err := gossh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return fmt.Errorf("failed to generate public key: %v", err) + } + pubBytes := gossh.MarshalAuthorizedKey(pub) + + if _, err := sshPublicKeyFile.Write(pubBytes); err != nil { + return fmt.Errorf("failed to write public key to file: %v", err) + } + sshPublicKeyFile.Close() + m.sshPublicKeyContent = string(pubBytes) + klog.Infof("Generated ssh public key:%s ", m.sshPublicKeyContent) + return nil +} + +func (m *MiniTestDockerDeployer) sshAdditionalArgs() []string { + return []string{"-i", m.sshPrivateKeyFile, "-p", m.sshPort} +} + +func (m *MiniTestDockerDeployer) scpAdditionalArgs() []string { + return []string{"-i", m.sshPrivateKeyFile, "-P", m.sshPort} +} diff --git a/hack/prow/minitest/deployer/util.go b/hack/prow/minitest/deployer/util.go new file mode 100644 index 000000000000..61de5a224e49 --- /dev/null +++ b/hack/prow/minitest/deployer/util.go @@ -0,0 +1,92 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "k8s.io/klog/v2" +) + +const ssh = "ssh" +const rsync = "rsync" + +func executeLocalCommand(ctx context.Context, name string, args ...string) error { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + klog.Infof("Executing: %v", cmd.Args) + return cmd.Run() +} + +func executeSSHCommand(ctx context.Context, user string, addr string, sshArguments []string, args ...string) error { + allArgs := []string{addr, "-o", "StrictHostKeyChecking=no", + "-o", "User=" + user, "-o", "UserKnownHostsFile=/dev/null"} + allArgs = append(allArgs, sshArguments...) + allArgs = append(allArgs, "--") + allArgs = append(allArgs, args...) + cmd := exec.CommandContext(ctx, "ssh", allArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + klog.Infof("Executing: %v", cmd.Args) + return cmd.Run() +} + +func sshConnectionCheck(ctx context.Context, user string, addr string, sshArguments []string) error { + var err error + for i := range 10 { + // cmd cannot be reused after its failure + err = executeSSHCommand(ctx, user, addr, sshArguments, "uname", "-a") + if err == nil { + return nil + } + klog.Infof("[%d/10]ssh command failed with error: %v", i, err) + time.Sleep(10 * time.Second) + } + return fmt.Errorf("failed to connect to vm: %v", err) +} + +func executeRsyncSSHCommand(ctx context.Context, sshArguments []string, src string, dst string, rsyncArgs []string) error { + sshArgs := []string{ssh, "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null"} + sshArgs = append(sshArgs, sshArguments...) + + allArgs := []string{"-e", strings.Join(sshArgs, " "), "-avz"} + allArgs = append(allArgs, rsyncArgs...) + allArgs = append(allArgs, src, dst) + cmd := exec.CommandContext(ctx, rsync, allArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + klog.Infof("Executing: %v", cmd.Args) + return cmd.Run() +} + +func executeScpCommand(ctx context.Context, user string, addr string, sshArguments []string, src string, dst string) error { + allArgs := []string{"-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null"} + allArgs = append(allArgs, sshArguments...) + + allArgs = append(allArgs, fmt.Sprintf("%s@%s:%s", user, addr, src), dst) + cmd := exec.CommandContext(ctx, "scp", allArgs...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + klog.Infof("Executing: %v", cmd.Args) + return cmd.Run() +} diff --git a/hack/prow/minitest/go.mod b/hack/prow/minitest/go.mod new file mode 100644 index 000000000000..c7990443ec02 --- /dev/null +++ b/hack/prow/minitest/go.mod @@ -0,0 +1,73 @@ +module minitest + +go 1.25.0 + +replace github.com/spf13/cobra => github.com/spf13/cobra v1.9.1 + +require ( + github.com/docker/go-connections v0.6.0 + github.com/google/uuid v1.6.0 + github.com/moby/moby/api v1.52.0-beta.1 + github.com/moby/moby/client v0.1.0-beta.0 + github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 + golang.org/x/crypto v0.41.0 + k8s.io/klog/v2 v2.130.1 + sigs.k8s.io/boskos v0.0.0-20250918103144-9ad89b7dd38e + sigs.k8s.io/kubetest2 v0.0.0-20250905115125-63caf6a17555 +) + +require ( + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bombsimon/logrusr/v4 v4.1.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/spf13/viper v1.19.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go4.org v0.0.0-20230225012048-214862532bf5 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/protobuf v1.36.4 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apimachinery v0.32.2 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/controller-runtime v0.16.5 // indirect + sigs.k8s.io/prow v0.0.0-20240619181241-cfb8754e0459 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/hack/prow/minitest/go.sum b/hack/prow/minitest/go.sum new file mode 100644 index 000000000000..410d5e958056 --- /dev/null +++ b/hack/prow/minitest/go.sum @@ -0,0 +1,416 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4= +github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/moby/api v1.52.0-beta.1 h1:r5U4U72E7xSHh4zX72ndY1mA/FOGiAPiGiz2a8rBW+w= +github.com/moby/moby/api v1.52.0-beta.1/go.mod h1:8sBV0soUREiudtow4vqJGOxa4GyHI5vLQmvgKdHq5Ok= +github.com/moby/moby/client v0.1.0-beta.0 h1:eXzrwi0YkzLvezOBKHafvAWNmH1B9HFh4n13yb2QgFE= +github.com/moby/moby/client v0.1.0-beta.0/go.mod h1:irAv8jRi4yKKBeND96Y+3AM9ers+KaJYk9Vmcm7loxs= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= +go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= +google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= +k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk= +pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/boskos v0.0.0-20250918103144-9ad89b7dd38e h1:OthwV3adPAnbNmaz/vkTHdeF27shWyY+wfd1ItyWLag= +sigs.k8s.io/boskos v0.0.0-20250918103144-9ad89b7dd38e/go.mod h1:v2jnRiOFvt2Ank4LbddEAFjwfj/+bv7uvrOfcNENTM8= +sigs.k8s.io/controller-runtime v0.16.5 h1:yr1cEJbX08xsTW6XEIzT13KHHmIyX8Umvme2cULvFZw= +sigs.k8s.io/controller-runtime v0.16.5/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= +sigs.k8s.io/kubetest2 v0.0.0-20250905115125-63caf6a17555 h1:WL7/VL7GfsYTe2FUrhB5FE+B3c0rHzhFCUD2l98MHNU= +sigs.k8s.io/kubetest2 v0.0.0-20250905115125-63caf6a17555/go.mod h1:pBd0cFaT0hDqmwQg+TIhyLgPMYaH66QMLcKd09XnKTI= +sigs.k8s.io/prow v0.0.0-20240619181241-cfb8754e0459 h1:t8nFAgqf4A53NMuaML7xbBkaKcQtN3aqPPHDsVLfWWs= +sigs.k8s.io/prow v0.0.0-20240619181241-cfb8754e0459/go.mod h1:B6hUZArrw0kjY/Q4I5qBJd6lQoZP0nloG0ot1cTMhaA= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/prow/minitest/main.go b/hack/prow/minitest/main.go new file mode 100644 index 000000000000..fafa77dd4fdb --- /dev/null +++ b/hack/prow/minitest/main.go @@ -0,0 +1,83 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "minitest/deployer" + "minitest/tester" + + "flag" + "os" + + "k8s.io/klog/v2" +) + +var deployers = map[string]func(string) deployer.MiniTestDeployer{ + // boskos deployer will require a gcp project and start instances in it to run the test + // the whole gcp project will be cleaned up after tests are done + "boskos": deployer.NewMiniTestBosKosDeployerFromConfigFile, + //docker deployer is for testing minitest. This should never be used for testing minikube + "docker": deployer.NewMiniTestDockerDeployerFromConfigFile, +} +var testers = map[string]tester.MiniTestTester{ + "kvm-docker-linux-amd64-integration": &tester.KVMDockerLinuxAmd64IntegrationTester{}, +} + +func main() { + + flagSet := flag.CommandLine + deployerName := flagSet.String("deployer", "boskos", "deployer to use. Options: [boskos, docker]") + config := flagSet.String("config", "", "path to deployer config file") + testerName := flagSet.String("tester", "kvm-docker-linux-amd64-integration", "tester to use. Options: [kvm-docker-linux-amd64-integration]") + klog.InitFlags(flagSet) + flagSet.Parse(os.Args[1:]) + + dep := getDeployer(*deployerName)(*config) + tester := getTester(*testerName) + + if err := dep.Up(); err != nil { + klog.Fatalf("failed to start deployer: %v", err) + } + var testErr error + if testErr = tester.Run(dep); testErr != nil { + klog.Errorf("failed to run tests: %v", testErr) + } + + if err := dep.Down(); err != nil { + klog.Fatalf("failed to stop deployer: %v", err) + } + if testErr != nil { + os.Exit(1) + } + +} + +func getDeployer(name string) func(string) deployer.MiniTestDeployer { + d, ok := deployers[name] + if !ok { + klog.Fatalf("deployer %s not found. Available deployers: %v", name, deployers) + } + return d +} + +func getTester(name string) tester.MiniTestTester { + t, ok := testers[name] + if !ok { + klog.Fatalf("tester %s not found. Available testers: %v", name, testers) + } + return t +} diff --git a/hack/prow/minitest/tester/kvm_docker_linux_amd64_integration.go b/hack/prow/minitest/tester/kvm_docker_linux_amd64_integration.go new file mode 100644 index 000000000000..0751cf280f9d --- /dev/null +++ b/hack/prow/minitest/tester/kvm_docker_linux_amd64_integration.go @@ -0,0 +1,60 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tester + +import ( + "fmt" + "os" + + "k8s.io/klog/v2" +) + +var _ MiniTestTester = &KVMDockerLinuxAmd64IntegrationTester{} + +// this runs the integration tests with kvm2 driver and a docker container runtime. +type KVMDockerLinuxAmd64IntegrationTester struct { +} + +// Run implements MiniTestTester. +func (k *KVMDockerLinuxAmd64IntegrationTester) Run(runner MiniTestRunner) error { + + if up, err := runner.IsUp(); err != nil || !up { + klog.Errorf("tester: deployed environment is not up: %v", err) + } + if err := runner.SyncToRemote(".", "~/minikube", []string{".cache"}); err != nil { + klog.Errorf("failed to sync file in docker deployer: %v", err) + } + pr := os.Getenv("PULL_NUMBER") + + var testErr error + // install docker and libvirtd first then run the test in a new shell + if err := runner.Execute("cd minikube && ./hack/prow/integration_kvm_docker_linux_x86-64_pre.sh"); err != nil { + klog.Errorf("failed to install docker in env: %v", err) + return err + } + if testErr = runner.Execute(fmt.Sprintf("cd minikube && PULL_NUMBER=\"%s\" ./hack/prow/integration_kvm_docker_linux_x86-64.sh", pr)); testErr != nil { + klog.Errorf("failed to execute command in env: %v", testErr) + // don't return here, we still want to collect the test reports + } + + // prow requires result file to be copied to $ARTIFACTS. All other files will not be persisted. + if err := copyFileToArtifact(runner); err != nil { + return err + } + return testErr + +} diff --git a/hack/prow/minitest/tester/tester.go b/hack/prow/minitest/tester/tester.go new file mode 100644 index 000000000000..fa64569e3e31 --- /dev/null +++ b/hack/prow/minitest/tester/tester.go @@ -0,0 +1,70 @@ +/* +Copyright 2025 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tester + +import ( + "os" + + "k8s.io/klog/v2" +) + +type MiniTestRunner interface { + // IsUp should return true if a test cluster is successfully provisioned + IsUp() (bool, error) + // Execute execute a command in the deployed environment + Execute(args ...string) error + // SyncToRemote copy files from src on host to dst on deployed environment + SyncToRemote(src string, dst string, excludedPattern []string) error + // SyncToRemote copy files from src on remote to host + SyncToHost(src string, dst string, excludedPattern []string) error +} + +type MiniTestTester interface { + // Run should run the actual tests + Run(MiniTestRunner) error +} + +func copyFileToArtifact(runner MiniTestRunner) error { + artifactLocation := os.Getenv("ARTIFACTS") + klog.Infof("copying to %s", artifactLocation) + + if err := runner.SyncToHost("~/minikube/testout.txt", artifactLocation, nil); err != nil { + klog.Errorf("failed to sync testout.txt from deployer: %v", err) + return err + } + if err := runner.SyncToHost("~/minikube/test.json", artifactLocation, nil); err != nil { + klog.Errorf("failed to sync test.json in from deployer: %v", err) + return err + } + + if err := runner.SyncToHost("~/minikube/junit-unit.xml", artifactLocation, nil); err != nil { + klog.Errorf("failed to sync junit-unit.xml in from deployer: %v", err) + return err + } + + if err := runner.SyncToHost("~/minikube/test.html", artifactLocation, nil); err != nil { + klog.Errorf("failed to sync test.html in from deployer: %v", err) + return err + } + + if err := runner.SyncToHost("~/minikube/test_summary.json", artifactLocation, nil); err != nil { + klog.Errorf("failed to sync test_summary.json in from deployer: %v", err) + return err + } + return nil + +} diff --git a/hack/prow/prow.mk b/hack/prow/prow.mk new file mode 100644 index 000000000000..47004ad925c8 --- /dev/null +++ b/hack/prow/prow.mk @@ -0,0 +1,19 @@ +.PHONY: integration-prow-kvm-docker-linux-x86-64 + + +integration-prow-docker-docker-linux-x86-64: +# build first +# container-runtime=docker driver=docker on linux/amd64 + ./hack/prow/minikube_cross_build.sh $(GO_VERSION) linux amd64 + ./hack/prow/util/integration_prow_wrapper.sh ./hack/prow/integration_docker_docker_linux_x86-64.sh + +integration-prow-kvm-docker-linux-x86-64: +# build first +# container-runtime=docker driver=kvm on linux/amd64 + ./hack/prow/minikube_cross_build.sh $(GO_VERSION) linux amd64 +# set up ssh keys for gcloud cli. These env vars are set by test/infra + mkdir -p -m 0700 ~/.ssh + cp -f "${GCE_SSH_PRIVATE_KEY_FILE}" ~/.ssh/google_compute_engine + cp -f "${GCE_SSH_PUBLIC_KEY_FILE}" ~/.ssh/google_compute_engine.pub + GOTOOLCHAIN=auto go build -C ./hack/prow/minitest -o $(PWD)/out/minitest . + ./out/minitest --deployer boskos --tester kvm-docker-linux-amd64-integration --config hack/prow/kvm.json \ No newline at end of file diff --git a/hack/prow/util/integration_prow_wrapper.sh b/hack/prow/util/integration_prow_wrapper.sh new file mode 100755 index 000000000000..27b6bb720d17 --- /dev/null +++ b/hack/prow/util/integration_prow_wrapper.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x +TARGET_SCRIPT=$1 +# run the target script with minikube user +result=$(./hack/prow/util/run_with_minikube_user.sh "$TARGET_SCRIPT") +# collect the logs as root user +echo "test finished with exit code $result" + +items=("testout.txt" "test.json" "junit-unit.xml" "test.html" "test_summary.json") +for item in "${items[@]}"; do + echo "Collecting ${item} to ${ARTIFACTS}/${item}" + cp "${item}" "${ARTIFACTS}/${item}" +done +exit $result diff --git a/hack/prow/util/run_with_minikube_user.sh b/hack/prow/util/run_with_minikube_user.sh new file mode 100755 index 000000000000..4d16a0675ff7 --- /dev/null +++ b/hack/prow/util/run_with_minikube_user.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +# when docker is the driver, we run integration tests directly in prow cluster +# by default, prow jobs run in root, so we must switch to a non-root user to run docker driver +NEW_USER="minikube" +TARGET_SCRIPT=$1 + +if [ "$(whoami)" == "root" ]; then + useradd -m -s /bin/bash "$NEW_USER" +fi +chown -R "$NEW_USER":"$NEW_USER" . +# install sudo if not present +apt-get update && apt-get install -y sudo +# give the new user passwordless sudo +echo "$NEW_USER ALL=(ALL) NOPASSWD:ALL" > "/etc/sudoers.d/$NEW_USER" +chmod 440 "/etc/sudoers.d/$NEW_USER" +# add the new user to the docker group +usermod -aG docker "$NEW_USER" +# exec the target script as the new user +su "$NEW_USER" -c "$TARGET_SCRIPT" \ No newline at end of file diff --git a/test/integration/no_kubernetes_test.go b/test/integration/no_kubernetes_test.go index 71fbd8fbff03..516df7d43670 100644 --- a/test/integration/no_kubernetes_test.go +++ b/test/integration/no_kubernetes_test.go @@ -39,7 +39,7 @@ func TestNoKubernetes(t *testing.T) { } type validateFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("NoKubernetes") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(5)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(60)) defer Cleanup(t, profile, cancel) // Serial tests diff --git a/test/integration/scheduled_stop_test.go b/test/integration/scheduled_stop_test.go index 2b0181be2ea4..d3a1f71ee073 100644 --- a/test/integration/scheduled_stop_test.go +++ b/test/integration/scheduled_stop_test.go @@ -79,7 +79,7 @@ func TestScheduledStopUnix(t *testing.T) { t.Skip("--schedule does not work with the none driver") } profile := UniqueProfileName("scheduled-stop") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(5)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(20)) defer CleanupWithLogs(t, profile, cancel) startMinikube(ctx, t, profile)