Merge pull request #16200 from spowelljr/removeOldRegistry

Remove remaining references to k8s.gcr.io
pull/16202/head
Steven Powell 2023-03-30 13:36:17 -07:00 committed by GitHub
commit 9e661c8cf9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 69 additions and 79 deletions

View File

@ -77,5 +77,5 @@ spec:
- name: root-mount - name: root-mount
mountPath: /root mountPath: /root
containers: containers:
- image: "{{default "k8s.gcr.io" .ImageRepository}}/{{.Images.Pause}}" - image: "{{default "registry.k8s.io" .ImageRepository}}/{{.Images.Pause}}"
name: pause name: pause

View File

@ -29,7 +29,7 @@ oom_score = 0
stream_server_address = "" stream_server_address = ""
stream_server_port = "10010" stream_server_port = "10010"
enable_selinux = false enable_selinux = false
sandbox_image = "{{default "k8s.gcr.io" .ImageRepository}}/pause:3.1" sandbox_image = "{{default "registry.k8s.io" .ImageRepository}}/pause:3.1"
stats_collect_period = 10 stats_collect_period = 10
systemd_cgroup = false systemd_cgroup = false
enable_tls_streaming = false enable_tls_streaming = false

View File

@ -31,7 +31,7 @@ oom_score = 0
stream_server_address = "" stream_server_address = ""
stream_server_port = "10010" stream_server_port = "10010"
enable_selinux = false enable_selinux = false
sandbox_image = "k8s.gcr.io/pause:3.6" sandbox_image = "registry.k8s.io/pause:3.6"
stats_collect_period = 10 stats_collect_period = 10
enable_tls_streaming = false enable_tls_streaming = false
max_container_log_line_size = 16384 max_container_log_line_size = 16384

View File

@ -31,7 +31,7 @@ oom_score = 0
stream_server_address = "" stream_server_address = ""
stream_server_port = "10010" stream_server_port = "10010"
enable_selinux = false enable_selinux = false
sandbox_image = "k8s.gcr.io/pause:3.6" sandbox_image = "registry.k8s.io/pause:3.6"
stats_collect_period = 10 stats_collect_period = 10
enable_tls_streaming = false enable_tls_streaming = false
max_container_log_line_size = 16384 max_container_log_line_size = 16384

View File

@ -339,7 +339,7 @@ global_auth_file = ""
# The image used to instantiate infra containers. # The image used to instantiate infra containers.
# This option supports live configuration reload. # This option supports live configuration reload.
pause_image = "k8s.gcr.io/pause:3.2" pause_image = "registry.k8s.io/pause:3.2"
# The path to a file containing credentials specific for pulling the pause_image from # The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json # above. The file is similar to that of /var/lib/kubelet/config.json

View File

@ -339,7 +339,7 @@ global_auth_file = ""
# The image used to instantiate infra containers. # The image used to instantiate infra containers.
# This option supports live configuration reload. # This option supports live configuration reload.
pause_image = "k8s.gcr.io/pause:3.2" pause_image = "registry.k8s.io/pause:3.2"
# The path to a file containing credentials specific for pulling the pause_image from # The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json # above. The file is similar to that of /var/lib/kubelet/config.json

View File

@ -31,7 +31,7 @@ oom_score = 0
stream_server_address = "" stream_server_address = ""
stream_server_port = "10010" stream_server_port = "10010"
enable_selinux = false enable_selinux = false
sandbox_image = "k8s.gcr.io/pause:3.6" sandbox_image = "registry.k8s.io/pause:3.6"
stats_collect_period = 10 stats_collect_period = 10
enable_tls_streaming = false enable_tls_streaming = false
max_container_log_line_size = 16384 max_container_log_line_size = 16384

View File

@ -382,7 +382,7 @@ func supportLegacyIngress(addon *assets.Addon, cc config.ClusterConfig) error {
"KubeWebhookCertgenPatch": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7", "KubeWebhookCertgenPatch": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
} }
addon.Registries = map[string]string{ addon.Registries = map[string]string{
"IngressController": "k8s.gcr.io", "IngressController": "registry.k8s.io",
} }
return nil return nil
} }

View File

@ -246,8 +246,8 @@ var Addons = map[string]*Addon{
"Alpine": "alpine:3.6@sha256:66790a2b79e1ea3e1dabac43990c54aca5d1ddf268d9a5a0285e4167c8b24475", "Alpine": "alpine:3.6@sha256:66790a2b79e1ea3e1dabac43990c54aca5d1ddf268d9a5a0285e4167c8b24475",
"Kibana": "kibana/kibana:5.6.2@sha256:cd948a9bda4622f1437afc4a3e78be6c8c25fc62f40aa0376f3d690f2436568f", "Kibana": "kibana/kibana:5.6.2@sha256:cd948a9bda4622f1437afc4a3e78be6c8c25fc62f40aa0376f3d690f2436568f",
}, map[string]string{ }, map[string]string{
"Elasticsearch": "k8s.gcr.io", "Elasticsearch": "registry.k8s.io",
"FluentdElasticsearch": "k8s.gcr.io", "FluentdElasticsearch": "registry.k8s.io",
"Kibana": "docker.elastic.co", "Kibana": "docker.elastic.co",
"Alpine": "docker.io", "Alpine": "docker.io",
}), }),
@ -446,8 +446,8 @@ var Addons = map[string]*Addon{
"NvidiaDriverInstaller": "minikube-nvidia-driver-installer:e2d9b43228decf5d6f7dce3f0a85d390f138fa01", "NvidiaDriverInstaller": "minikube-nvidia-driver-installer:e2d9b43228decf5d6f7dce3f0a85d390f138fa01",
"Pause": "pause:2.0@sha256:9ce5316f9752b8347484ab0f6778573af15524124d52b93230b9a0dcc987e73e", "Pause": "pause:2.0@sha256:9ce5316f9752b8347484ab0f6778573af15524124d52b93230b9a0dcc987e73e",
}, map[string]string{ }, map[string]string{
"NvidiaDriverInstaller": "k8s.gcr.io", "NvidiaDriverInstaller": "registry.k8s.io",
"Pause": "k8s.gcr.io", "Pause": "registry.k8s.io",
}), }),
"nvidia-gpu-device-plugin": NewAddon([]*BinAsset{ "nvidia-gpu-device-plugin": NewAddon([]*BinAsset{
MustBinAsset(addons.NvidiaGpuDevicePluginAssets, MustBinAsset(addons.NvidiaGpuDevicePluginAssets,
@ -458,7 +458,7 @@ var Addons = map[string]*Addon{
}, false, "nvidia-gpu-device-plugin", "3rd party (Nvidia)", "", "https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/", map[string]string{ }, false, "nvidia-gpu-device-plugin", "3rd party (Nvidia)", "", "https://minikube.sigs.k8s.io/docs/tutorials/nvidia_gpu/", map[string]string{
"NvidiaDevicePlugin": "nvidia-gpu-device-plugin@sha256:4b036e8844920336fa48f36edeb7d4398f426d6a934ba022848deed2edbf09aa", "NvidiaDevicePlugin": "nvidia-gpu-device-plugin@sha256:4b036e8844920336fa48f36edeb7d4398f426d6a934ba022848deed2edbf09aa",
}, map[string]string{ }, map[string]string{
"NvidiaDevicePlugin": "k8s.gcr.io", "NvidiaDevicePlugin": "registry.k8s.io",
}), }),
"logviewer": NewAddon([]*BinAsset{ "logviewer": NewAddon([]*BinAsset{
MustBinAsset(addons.LogviewerAssets, MustBinAsset(addons.LogviewerAssets,
@ -591,7 +591,7 @@ var Addons = map[string]*Addon{
"GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.14@sha256:60fc3f336083dcd0a472caa51edfbf497d4df37115bb65e2d12739ed461db925", "GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.14@sha256:60fc3f336083dcd0a472caa51edfbf497d4df37115bb65e2d12739ed461db925",
}, map[string]string{ }, map[string]string{
"GCPAuthWebhook": "gcr.io", "GCPAuthWebhook": "gcr.io",
"KubeWebhookCertgen": "k8s.gcr.io", "KubeWebhookCertgen": "registry.k8s.io",
}), }),
"volumesnapshots": NewAddon([]*BinAsset{ "volumesnapshots": NewAddon([]*BinAsset{
// make sure the order of apply. `csi-hostpath-snapshotclass` must be the first position, because it depends on `snapshot.storage.k8s.io_volumesnapshotclasses` // make sure the order of apply. `csi-hostpath-snapshotclass` must be the first position, because it depends on `snapshot.storage.k8s.io_volumesnapshotclasses`
@ -948,8 +948,8 @@ func GenerateTemplateData(addon *Addon, cc *config.ClusterConfig, netInfo Networ
// tl;dr If the user specified a custom image remove the default registry // tl;dr If the user specified a custom image remove the default registry
// Without the line below, if you try to overwrite an image the default registry is still used in the templating // Without the line below, if you try to overwrite an image the default registry is still used in the templating
// Example - image name: MetricsScraper, default registry: docker.io, default image: kubernetesui/metrics-scraper // Example - image name: MetricsScraper, default registry: docker.io, default image: kubernetesui/metrics-scraper
// Passed on addon enable: --images=MetricsScraper=k8s.gcr.io/echoserver:1.4 // Passed on addon enable: --images=MetricsScraper=registry.k8s.io/echoserver:1.4
// Without this line the resulting image would be docker.io/k8s.gcr.io/echoserver:1.4 // Without this line the resulting image would be docker.io/registry.k8s.io/echoserver:1.4
if _, ok := cc.CustomAddonImages[name]; ok { if _, ok := cc.CustomAddonImages[name]; ok {
opts.Registries[name] = "" opts.Registries[name] = ""
} }

View File

@ -34,7 +34,7 @@ import (
) )
const ( const (
// builds a docker v2 repository API call in the format https://k8s.gcr.io/v2/coredns/coredns/tags/list // builds a docker v2 repository API call in the format https://registry.k8s.io/v2/coredns/coredns/tags/list
tagURLTemplate = "https://%s/v2/%s/tags/list" tagURLTemplate = "https://%s/v2/%s/tags/list"
) )
@ -70,7 +70,7 @@ func componentImage(name string, v semver.Version, mirror string) string {
return fmt.Sprintf("%s:v%s", path.Join(kubernetesRepo(mirror), name), v) return fmt.Sprintf("%s:v%s", path.Join(kubernetesRepo(mirror), name), v)
} }
// fixes 13136 by getting the latest image version from the k8s.gcr.io repository instead of hardcoded // fixes 13136 by getting the latest image version from the registry.k8s.io repository instead of hardcoded
func findLatestTagFromRepository(url string, lastKnownGood string) string { func findLatestTagFromRepository(url string, lastKnownGood string) string {
client := &http.Client{} client := &http.Client{}
errorMsg := fmt.Sprintf("Failed to get latest image version for %s, reverting to version %s.", url, lastKnownGood) errorMsg := fmt.Sprintf("Failed to get latest image version for %s, reverting to version %s.", url, lastKnownGood)

View File

@ -76,7 +76,7 @@ type ClusterConfig struct {
KubernetesConfig KubernetesConfig KubernetesConfig KubernetesConfig
Nodes []Node Nodes []Node
Addons map[string]bool Addons map[string]bool
CustomAddonImages map[string]string // Maps image names to the image to use for addons. e.g. Dashboard -> k8s.gcr.io/echoserver:1.4 makes dashboard addon use echoserver for its Dashboard deployment. CustomAddonImages map[string]string // Maps image names to the image to use for addons. e.g. Dashboard -> registry.k8s.io/echoserver:1.4 makes dashboard addon use echoserver for its Dashboard deployment.
CustomAddonRegistries map[string]string // Maps image names to the registry to use for addons. See CustomAddonImages for example. CustomAddonRegistries map[string]string // Maps image names to the registry to use for addons. See CustomAddonImages for example.
VerifyComponents map[string]bool // map of components to verify and wait for after start. VerifyComponents map[string]bool // map of components to verify and wait for after start.
StartHostTimeout time.Duration StartHostTimeout time.Duration

View File

@ -61,8 +61,8 @@ func DeleteFromCacheDir(images []string) error {
// SaveToDir will cache images on the host // SaveToDir will cache images on the host
// //
// The cache directory currently caches images using the imagename_tag // The cache directory currently caches images using the imagename_tag
// For example, k8s.gcr.io/kube-addon-manager:v6.5 would be // For example, registry.k8s.io/kube-addon-manager:v6.5 would be
// stored at $CACHE_DIR/k8s.gcr.io/kube-addon-manager_v6.5 // stored at $CACHE_DIR/registry.k8s.io/kube-addon-manager_v6.5
func SaveToDir(images []string, cacheDir string, overwrite bool) error { func SaveToDir(images []string, cacheDir string, overwrite bool) error {
var g errgroup.Group var g errgroup.Group
for _, image := range images { for _, image := range images {

View File

@ -53,7 +53,7 @@ VBoxManage.exe: error: Details: code E_FAIL (0x80004005), component MachineWrap,
{4913, "linux", "PR_KVM_CREATE_BUSY", `Unable to start VM: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: process exited while connecting to monitor: ioctl(KVM_CREATE_VM) failed: 16 Device or resource busy`}, {4913, "linux", "PR_KVM_CREATE_BUSY", `Unable to start VM: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: process exited while connecting to monitor: ioctl(KVM_CREATE_VM) failed: 16 Device or resource busy`},
{5950, "linux", "PR_KVM_ISO_PERMISSION", `Retriable failure: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-11-19T16:08:16.757609Z qemu-kvm: -drive file=/home/lnicotra/.minikube/machines/minikube/boot2docker.iso,format=raw,if=none,id=drive-scsi0-0-0-2,readonly=on: could not open disk image /home/lnicotra/.minikube/machines/minikube/boot2docker.iso: Could not open '/home/lnicotra/.minikube/machines/minikube/boot2docker.iso': Permission denied'`}, {5950, "linux", "PR_KVM_ISO_PERMISSION", `Retriable failure: create: Error creating machine: Error in driver during machine creation: error creating VM: virError(Code=1, Domain=10, Message='internal error: qemu unexpectedly closed the monitor: 2019-11-19T16:08:16.757609Z qemu-kvm: -drive file=/home/lnicotra/.minikube/machines/minikube/boot2docker.iso,format=raw,if=none,id=drive-scsi0-0-0-2,readonly=on: could not open disk image /home/lnicotra/.minikube/machines/minikube/boot2docker.iso: Could not open '/home/lnicotra/.minikube/machines/minikube/boot2docker.iso': Permission denied'`},
{5836, "", "SVC_OPEN_NOT_FOUND", `Error opening service: Service kubernetes-bootcamp was not found in "default" namespace. You may select another namespace by using 'minikube service kubernetes-bootcamp -n : Temporary Error: Error getting service kubernetes-bootcamp: services "kubernetes-bootcamp" not found`}, {5836, "", "SVC_OPEN_NOT_FOUND", `Error opening service: Service kubernetes-bootcamp was not found in "default" namespace. You may select another namespace by using 'minikube service kubernetes-bootcamp -n : Temporary Error: Error getting service kubernetes-bootcamp: services "kubernetes-bootcamp" not found`},
{3898, "", "INET_PULL_TIMEOUT", `[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-controller-manager:v1.17.0: output: Error response from daemon: Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`}, {3898, "", "INET_PULL_TIMEOUT", `[ERROR ImagePull]: failed to pull image registry.k8s.io/kube-controller-manager:v1.17.0: output: Error response from daemon: Get https://registry.k8s.io/v2/: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)`},
{6079, "darwin", "PR_HYPERKIT_CRASHED", `Error creating machine: Error in driver during machine creation: hyperkit crashed! command line:`}, {6079, "darwin", "PR_HYPERKIT_CRASHED", `Error creating machine: Error in driver during machine creation: hyperkit crashed! command line:`},
{5636, "linux", "INET_DEFAULT_ROUTE", `Unable to get VM IP address: unable to select an IP from default routes.`}, {5636, "linux", "INET_DEFAULT_ROUTE", `Unable to get VM IP address: unable to select an IP from default routes.`},
{6087, "", "GUEST_DOES_NOT_EXIST", `Error getting host status: state: machine does not exist`}, {6087, "", "GUEST_DOES_NOT_EXIST", `Error getting host status: state: machine does not exist`},

View File

@ -294,7 +294,7 @@ Steps:
asserts basic "service" command functionality asserts basic "service" command functionality
#### validateServiceCmdDeployApp #### validateServiceCmdDeployApp
Create a new `k8s.gcr.io/echoserver` deployment Create a new `registry.k8s.io/echoserver` deployment
#### validateServiceCmdList #### validateServiceCmdList
Run `minikube service list` to make sure the newly created service is correctly listed in the output Run `minikube service list` to make sure the newly created service is correctly listed in the output
@ -314,7 +314,7 @@ Run `minikube service` with a regular `--url` to make sure the HTTP endpoint URL
#### validateServiceCmdConnect #### validateServiceCmdConnect
Steps: Steps:
- Create a new `k8s.gcr.io/echoserver` deployment - Create a new `registry.k8s.io/echoserver` deployment
- Run `minikube service` with a regular `--url` to make sure the HTTP endpoint URL of the service is printed - Run `minikube service` with a regular `--url` to make sure the HTTP endpoint URL of the service is printed
- Make sure we can hit the endpoint URL with an HTTP GET request - Make sure we can hit the endpoint URL with an HTTP GET request

View File

@ -19,8 +19,8 @@ minikube addons images efk
|----------------------|------------------------------|-------------------| |----------------------|------------------------------|-------------------|
| IMAGE NAME | DEFAULT IMAGE | DEFAULT REGISTRY | | IMAGE NAME | DEFAULT IMAGE | DEFAULT REGISTRY |
|----------------------|------------------------------|-------------------| |----------------------|------------------------------|-------------------|
| Elasticsearch | elasticsearch:v5.6.2 | k8s.gcr.io | | Elasticsearch | elasticsearch:v5.6.2 | registry.k8s.io |
| FluentdElasticsearch | fluentd-elasticsearch:v2.0.2 | k8s.gcr.io | | FluentdElasticsearch | fluentd-elasticsearch:v2.0.2 | registry.k8s.io |
| Alpine | alpine:3.6 | | | Alpine | alpine:3.6 | |
| Kibana | kibana/kibana:5.6.2 | docker.elastic.co | | Kibana | kibana/kibana:5.6.2 | docker.elastic.co |
|----------------------|------------------------------|-------------------| |----------------------|------------------------------|-------------------|
@ -31,7 +31,7 @@ An empty registry means the image is stored locally or default registry `docker.
The `IMAGE NAME` column is used to customize the corresponding image and registry. The `IMAGE NAME` column is used to customize the corresponding image and registry.
Assume we have a private registry at `192.168.10.2:5555` to replace `k8s.gcr.io` and a locally built Kibana called `kibana/kibana:5.6.2-custom`. Assume we have a private registry at `192.168.10.2:5555` to replace `registry.k8s.io` and a locally built Kibana called `kibana/kibana:5.6.2-custom`.
We could load local images to minikube by: We could load local images to minikube by:
@ -54,4 +54,4 @@ minikube addons enable efk --images="Kibana=kibana/kibana:5.6.2-custom" --regist
🌟 The 'efk' addon is enabled 🌟 The 'efk' addon is enabled
``` ```
Now the `efk` addon is using the custom registry and images. Now the `efk` addon is using the custom registry and images.

View File

@ -10,9 +10,11 @@ description: >
minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations: minikube has built-in support for caching downloaded resources into `$MINIKUBE_HOME/cache`. Here are the important file locations:
* `~/.minikube/cache` - Top-level folder * `~/.minikube/cache` - Top-level folder
* `~/.minikube/cache/iso` - VM ISO image. Typically updated once per major minikube release. * `~/.minikube/cache/iso/<arch>` - VM ISO image. Typically updated once per major minikube release.
* `~/.minikube/cache/images` - Docker images used by Kubernetes. * `~/.minikube/cache/kic/<arch>` - Docker base image. Typically updated once per major minikube release.
* `~/.minikube/cache/<version>` - Kubernetes binaries, such as `kubeadm` and `kubelet` * `~/.minikube/cache/images/<arch>` - Images used by Kubernetes, only exists if preload doesn't exist.
* `~/.minikube/cache/<os>/<arch>/<version>` - Kubernetes binaries, such as `kubeadm` and `kubelet`
* `~/.minikube/cache/preloaded-tarball` - Tarball of preloaded images to improve start time
## Kubernetes image cache ## Kubernetes image cache
@ -22,25 +24,13 @@ NOTE: the `none` driver caches images directly into Docker rather than a separat
## Sharing the minikube cache ## Sharing the minikube cache
For offline use on other hosts, one can copy the contents of `~/.minikube/cache`. As of the v1.0 release, this directory contains 685MB of data: For offline use on other hosts, one can copy the contents of `~/.minikube/cache`.
```text ```text
cache/iso/minikube-v1.0.0.iso cache/linux/amd64/v1.26.1/kubectl
cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 cache/kic/amd64/kicbase_v0.0.37@sha256_8bf7a0e8a062bc5e2b71d28b35bfa9cc862d9220e234e86176b3785f685d8b15.tar
cache/images/k8s.gcr.io/k8s-dns-sidecar-amd64_1.14.13 cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4
cache/images/k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64_1.14.13 cache/preloaded-tarball/preloaded-images-k8s-v18-v1.26.1-docker-overlay2-amd64.tar.lz4.checksum
cache/images/k8s.gcr.io/kubernetes-dashboard-amd64_v1.10.1
cache/images/k8s.gcr.io/kube-scheduler_v1.14.0
cache/images/k8s.gcr.io/coredns_1.3.1
cache/images/k8s.gcr.io/kube-controller-manager_v1.14.0
cache/images/k8s.gcr.io/kube-apiserver_v1.14.0
cache/images/k8s.gcr.io/pause_3.1
cache/images/k8s.gcr.io/etcd_3.3.10
cache/images/k8s.gcr.io/kube-addon-manager_v9.0
cache/images/k8s.gcr.io/k8s-dns-kube-dns-amd64_1.14.13
cache/images/k8s.gcr.io/kube-proxy_v1.14.0
cache/v1.14.0/kubeadm
cache/v1.14.0/kubelet
``` ```
If any of these files exist, minikube will use copy them into the VM directly rather than pulling them from the internet. If any of these files exist, minikube will use copy them into the VM directly rather than pulling them from the internet.

View File

@ -392,7 +392,7 @@ buildctl --addr unix://buildkitd.sock build \
--frontend=dockerfile.v0 \ --frontend=dockerfile.v0 \
--local context=. \ --local context=. \
--local dockerfile=. \ --local dockerfile=. \
--output type=image,name=k8s.gcr.io/username/imagename:latest --output type=image,name=registry.k8s.io/username/imagename:latest
``` ```
Now you can 'build' against the storage inside minikube. which is instantly accessible to kubernetes cluster. Now you can 'build' against the storage inside minikube. which is instantly accessible to kubernetes cluster.

View File

@ -72,8 +72,8 @@ This error indicates that the host:port combination defined by HTTPS_PROXY or HT
```text ```text
Unable to pull images, which may be OK: Unable to pull images, which may be OK:
failed to pull image "k8s.gcr.io/kube-apiserver:v1.13.3": output: Error response from daemon: failed to pull image "registry.k8s.io/kube-apiserver:v1.13.3": output: Error response from daemon:
Get https://k8s.gcr.io/v2/: net/http: request canceled while waiting for connection Get https://registry.k8s.io/v2/: net/http: request canceled while waiting for connection
(Client.Timeout exceeded while awaiting headers) (Client.Timeout exceeded while awaiting headers)
``` ```
@ -82,9 +82,9 @@ This error indicates that the container runtime running within the VM does not h
#### x509: certificate signed by unknown authority #### x509: certificate signed by unknown authority
```text ```text
[ERROR ImagePull]: failed to pull image k8s.gcr.io/kube-apiserver:v1.13.3: [ERROR ImagePull]: failed to pull image registry.k8s.io/kube-apiserver:v1.13.3:
output: Error response from daemon: output: Error response from daemon:
Get https://k8s.gcr.io/v2/: x509: certificate signed by unknown authority Get https://registry.k8s.io/v2/: x509: certificate signed by unknown authority
``` ```
This is because minikube VM is stuck behind a proxy that rewrites HTTPS responses to contain its own TLS certificate. The [solution](https://github.com/kubernetes/minikube/issues/3613#issuecomment-461034222) is to install the proxy certificate into a location that is copied to the VM at startup, so that it can be validated. This is because minikube VM is stuck behind a proxy that rewrites HTTPS responses to contain its own TLS certificate. The [solution](https://github.com/kubernetes/minikube/issues/3613#issuecomment-461034222) is to install the proxy certificate into a location that is copied to the VM at startup, so that it can be validated.

View File

@ -278,7 +278,7 @@ func runImageList(ctx context.Context, t *testing.T, profile, testName, format,
func expectedImageFormat(format string) []string { func expectedImageFormat(format string) []string {
return []string{ return []string{
fmt.Sprintf(format, "k8s.gcr.io/pause"), fmt.Sprintf(format, "registry.k8s.io/pause"),
fmt.Sprintf(format, "registry.k8s.io/kube-apiserver"), fmt.Sprintf(format, "registry.k8s.io/kube-apiserver"),
} }
} }
@ -1040,7 +1040,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
// docs: Run `minikube cache add` and make sure we can add a remote image to the cache // docs: Run `minikube cache add` and make sure we can add a remote image to the cache
t.Run("add_remote", func(t *testing.T) { t.Run("add_remote", func(t *testing.T) {
for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:3.3", "k8s.gcr.io/pause:latest"} { for _, img := range []string{"registry.k8s.io/pause:3.1", "registry.k8s.io/pause:3.3", "registry.k8s.io/pause:latest"} {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
if err != nil { if err != nil {
t.Errorf("failed to 'cache add' remote image %q. args %q err %v", img, rr.Command(), err) t.Errorf("failed to 'cache add' remote image %q. args %q err %v", img, rr.Command(), err)
@ -1093,10 +1093,10 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
}) })
// docs: Run `minikube cache delete` and make sure we can delete an image from the cache // docs: Run `minikube cache delete` and make sure we can delete an image from the cache
t.Run("delete_k8s.gcr.io/pause:3.3", func(t *testing.T) { t.Run("CacheDelete", func(t *testing.T) {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "k8s.gcr.io/pause:3.3")) rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "registry.k8s.io/pause:3.3"))
if err != nil { if err != nil {
t.Errorf("failed to delete image k8s.gcr.io/pause:3.3 from cache. args %q: %v", rr.Command(), err) t.Errorf("failed to delete image registry.k8s.io/pause:3.3 from cache. args %q: %v", rr.Command(), err)
} }
}) })
@ -1106,11 +1106,11 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
if err != nil { if err != nil {
t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err) t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err)
} }
if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { if !strings.Contains(rr.Output(), "registry.k8s.io/pause") {
t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got: ***%s***", rr.Output()) t.Errorf("expected 'cache list' output to include 'registry.k8s.io/pause' but got: ***%s***", rr.Output())
} }
if strings.Contains(rr.Output(), "k8s.gcr.io/pause:3.3") { if strings.Contains(rr.Output(), "registry.k8s.io/pause:3.3") {
t.Errorf("expected 'cache list' output not to include k8s.gcr.io/pause:3.3 but got: ***%s***", rr.Output()) t.Errorf("expected 'cache list' output not to include registry.k8s.io/pause:3.3 but got: ***%s***", rr.Output())
} }
}) })
@ -1128,7 +1128,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
// docs: Delete an image from minikube node and run `minikube cache reload` to make sure the image is brought back correctly // docs: Delete an image from minikube node and run `minikube cache reload` to make sure the image is brought back correctly
t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back
img := "k8s.gcr.io/pause:latest" img := "registry.k8s.io/pause:latest"
// deleting image inside minikube node manually // deleting image inside minikube node manually
var binary string var binary string
@ -1163,7 +1163,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
// delete will clean up the cached images since they are global and all other tests will load it for no reason // delete will clean up the cached images since they are global and all other tests will load it for no reason
t.Run("delete", func(t *testing.T) { t.Run("delete", func(t *testing.T) {
for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:latest"} { for _, img := range []string{"registry.k8s.io/pause:3.1", "registry.k8s.io/pause:latest"} {
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img)) rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img))
if err != nil { if err != nil {
t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err) t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err)
@ -1425,16 +1425,16 @@ func validateServiceCmd(ctx context.Context, t *testing.T, profile string) {
validateServiceCmdURL(ctx, t, profile) validateServiceCmdURL(ctx, t, profile)
} }
// validateServiceCmdDeployApp Create a new `k8s.gcr.io/echoserver` deployment // validateServiceCmdDeployApp Create a new `registry.k8s.io/echoserver` deployment
func validateServiceCmdDeployApp(ctx context.Context, t *testing.T, profile string) { func validateServiceCmdDeployApp(ctx context.Context, t *testing.T, profile string) {
t.Run("DeployApp", func(t *testing.T) { t.Run("DeployApp", func(t *testing.T) {
var rr *RunResult var rr *RunResult
var err error var err error
// k8s.gcr.io/echoserver is not multi-arch // registry.k8s.io/echoserver is not multi-arch
if arm64Platform() { if arm64Platform() {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=k8s.gcr.io/echoserver-arm:1.8")) rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=registry.k8s.io/echoserver-arm:1.8"))
} else { } else {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=k8s.gcr.io/echoserver:1.8")) rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node", "--image=registry.k8s.io/echoserver:1.8"))
} }
if err != nil { if err != nil {
@ -1619,12 +1619,12 @@ func validateServiceCmdConnect(ctx context.Context, t *testing.T, profile string
var rr *RunResult var rr *RunResult
var err error var err error
// docs: Create a new `k8s.gcr.io/echoserver` deployment // docs: Create a new `registry.k8s.io/echoserver` deployment
// k8s.gcr.io/echoserver is not multi-arch // registry.k8s.io/echoserver is not multi-arch
if arm64Platform() { if arm64Platform() {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node-connect", "--image=k8s.gcr.io/echoserver-arm:1.8")) rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node-connect", "--image=registry.k8s.io/echoserver-arm:1.8"))
} else { } else {
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node-connect", "--image=k8s.gcr.io/echoserver:1.8")) rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "deployment", "hello-node-connect", "--image=registry.k8s.io/echoserver:1.8"))
} }
if err != nil { if err != nil {

View File

@ -202,7 +202,7 @@ func validateEnableAddonWhileActive(ctx context.Context, t *testing.T, profile,
defer PostMortemLogs(t, profile) defer PostMortemLogs(t, profile)
// Enable an addon to assert it requests the correct image. // Enable an addon to assert it requests the correct image.
rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "metrics-server", "-p", profile, "--images=MetricsServer=k8s.gcr.io/echoserver:1.4", "--registries=MetricsServer=fake.domain")) rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "metrics-server", "-p", profile, "--images=MetricsServer=registry.k8s.io/echoserver:1.4", "--registries=MetricsServer=fake.domain"))
if err != nil { if err != nil {
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
} }
@ -217,8 +217,8 @@ func validateEnableAddonWhileActive(ctx context.Context, t *testing.T, profile,
t.Errorf("failed to get info on auto-pause deployments. args %q: %v", rr.Command(), err) t.Errorf("failed to get info on auto-pause deployments. args %q: %v", rr.Command(), err)
} }
deploymentInfo := rr.Stdout.String() deploymentInfo := rr.Stdout.String()
if !strings.Contains(deploymentInfo, " fake.domain/k8s.gcr.io/echoserver:1.4") { if !strings.Contains(deploymentInfo, " fake.domain/registry.k8s.io/echoserver:1.4") {
t.Errorf("addon did not load correct image. Expected to contain \" fake.domain/k8s.gcr.io/echoserver:1.4\". Addon deployment info: %s", deploymentInfo) t.Errorf("addon did not load correct image. Expected to contain \" fake.domain/registry.k8s.io/echoserver:1.4\". Addon deployment info: %s", deploymentInfo)
} }
} }
@ -243,7 +243,7 @@ func validateEnableAddonAfterStop(ctx context.Context, t *testing.T, profile, _,
} }
// Enable an addon to assert it comes up afterwards // Enable an addon to assert it comes up afterwards
rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile, "--images=MetricsScraper=k8s.gcr.io/echoserver:1.4")) rr, err := Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile, "--images=MetricsScraper=registry.k8s.io/echoserver:1.4"))
if err != nil { if err != nil {
t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err) t.Errorf("failed to enable an addon post-stop. args %q: %v", rr.Command(), err)
} }
@ -293,8 +293,8 @@ func validateAddonAfterStop(ctx context.Context, t *testing.T, profile, tcName,
t.Errorf("failed to get info on kubernetes-dashboard deployments. args %q: %v", rr.Command(), err) t.Errorf("failed to get info on kubernetes-dashboard deployments. args %q: %v", rr.Command(), err)
} }
deploymentInfo := rr.Stdout.String() deploymentInfo := rr.Stdout.String()
if !strings.Contains(deploymentInfo, " k8s.gcr.io/echoserver:1.4") { if !strings.Contains(deploymentInfo, " registry.k8s.io/echoserver:1.4") {
t.Errorf("addon did not load correct image. Expected to contain \" k8s.gcr.io/echoserver:1.4\". Addon deployment info: %s", deploymentInfo) t.Errorf("addon did not load correct image. Expected to contain \" registry.k8s.io/echoserver:1.4\". Addon deployment info: %s", deploymentInfo)
} }
} }

View File

@ -18,7 +18,7 @@ spec:
- name: busybox - name: busybox
# flaky nslookup in busybox versions newer than 1.28: # flaky nslookup in busybox versions newer than 1.28:
# https://github.com/docker-library/busybox/issues/48 # https://github.com/docker-library/busybox/issues/48
# note: k8s.gcr.io/e2e-test-images/agnhost:2.32 # note: registry.k8s.io/e2e-test-images/agnhost:2.32
# has similar issues (ie, resolves but returns exit code 1) # has similar issues (ie, resolves but returns exit code 1)
image: gcr.io/k8s-minikube/busybox:1.28 image: gcr.io/k8s-minikube/busybox:1.28
command: command: