Merge branch 'master' of github.com:kubernetes/minikube into restart

pull/7973/head
Sharif Elgamal 2020-05-27 09:58:22 -07:00
commit d0753347b5
9 changed files with 11366 additions and 14 deletions

View File

@ -24,9 +24,11 @@ import (
"io"
"net"
"os"
"os/exec"
"strconv"
"strings"
"github.com/golang/glog"
"github.com/spf13/cobra"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/minikube/command"
@ -119,6 +121,12 @@ func isDockerActive(r command.Runner) bool {
return sysinit.New(r).Active("docker")
}
func mustRestartDocker(name string, runner command.Runner) {
if err := sysinit.New(runner).Restart("docker"); err != nil {
exit.WithCodeT(exit.Unavailable, `The Docker service within '{{.name}}' is not active`, out.V{"name": name})
}
}
// dockerEnvCmd represents the docker-env command
var dockerEnvCmd = &cobra.Command{
Use: "docker-env",
@ -138,14 +146,15 @@ var dockerEnvCmd = &cobra.Command{
out.V{"runtime": co.Config.KubernetesConfig.ContainerRuntime})
}
if ok := isDockerActive(co.CP.Runner); !ok {
exit.WithCodeT(exit.Unavailable, `The docker service within '{{.name}}' is not active`, out.V{"name": cname})
}
sh := shell.EnvConfig{
Shell: shell.ForceShell,
}
if ok := isDockerActive(co.CP.Runner); !ok {
glog.Warningf("dockerd is not active will try to restart it...")
mustRestartDocker(cname, co.CP.Runner)
}
var err error
port := constants.DockerDaemonPort
if driver.NeedsPortForward(driverName) {
@ -172,6 +181,13 @@ var dockerEnvCmd = &cobra.Command{
}
}
out, err := tryDockerConnectivity("docker", ec)
if err != nil { // docker might be up but been loaded with wrong certs/config
// to fix issues like this #8185
glog.Warningf("couldn't connect to docker inside minikube. will try to restart dockerd service... output: %s error: %v", string(out), err)
mustRestartDocker(cname, co.CP.Runner)
}
if dockerUnset {
if err := dockerUnsetScript(ec, os.Stdout); err != nil {
exit.WithError("Error generating unset output", err)
@ -238,6 +254,23 @@ func dockerEnvVars(ec DockerEnvConfig) map[string]string {
return env
}
// dockerEnvVarsList gets the necessary docker env variables to allow the use of minikube's docker daemon to be used in a exec.Command
func dockerEnvVarsList(ec DockerEnvConfig) []string {
return []string{
fmt.Sprintf("%s=%s", constants.DockerTLSVerifyEnv, "1"),
fmt.Sprintf("%s=%s", constants.DockerHostEnv, dockerURL(ec.hostIP, ec.port)),
fmt.Sprintf("%s=%s", constants.DockerCertPathEnv, ec.certsDir),
fmt.Sprintf("%s=%s", constants.MinikubeActiveDockerdEnv, ec.profile),
}
}
// tryDockerConnectivity will try to connect to docker env from user's POV to detect the problem if it needs reset or not
func tryDockerConnectivity(bin string, ec DockerEnvConfig) ([]byte, error) {
c := exec.Command(bin, "version", "--format={{.Server}}")
c.Env = append(os.Environ(), dockerEnvVarsList(ec)...)
return c.CombinedOutput()
}
func init() {
defaultNoProxyGetter = &EnvNoProxyGetter{}
dockerEnvCmd.Flags().BoolVar(&noProxy, "no-proxy", false, "Add machine IP to NO_PROXY environment variable")

View File

@ -479,14 +479,7 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
}
if cmd.Flags().Changed(kubernetesVersion) {
switch viper.GetString(kubernetesVersion) {
case "latest":
existing.KubernetesConfig.KubernetesVersion = constants.NewestKubernetesVersion
case "stable":
existing.KubernetesConfig.KubernetesVersion = constants.DefaultKubernetesVersion
default:
existing.KubernetesConfig.KubernetesVersion = viper.GetString(kubernetesVersion)
}
cc.KubernetesConfig.KubernetesVersion = getKubernetesVersion(existing)
}
if cmd.Flags().Changed(apiServerName) {

10848
deploy/addons/olm/crds.yaml Normal file

File diff suppressed because it is too large Load Diff

337
deploy/addons/olm/olm.yaml Normal file
View File

@ -0,0 +1,337 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: olm
---
apiVersion: v1
kind: Namespace
metadata:
name: operators
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: olm-operator-serviceaccount
namespace: olm
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:controller:operator-lifecycle-manager
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
- nonResourceURLs: ["*"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: olm-operator-binding-olm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:controller:operator-lifecycle-manager
subjects:
- kind: ServiceAccount
name: olm-operator-serviceaccount
namespace: olm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: olm-operator
namespace: olm
labels:
app: olm-operator
spec:
strategy:
type: RollingUpdate
replicas: 1
selector:
matchLabels:
app: olm-operator
template:
metadata:
labels:
app: olm-operator
spec:
serviceAccountName: olm-operator-serviceaccount
containers:
- name: olm-operator
command:
- /bin/olm
args:
- -namespace
- $(OPERATOR_NAMESPACE)
- -writeStatusName
- ""
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
- containerPort: 8081
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
terminationMessagePolicy: FallbackToLogsOnError
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERATOR_NAME
value: olm-operator
resources:
requests:
cpu: 10m
memory: 160Mi
nodeSelector:
beta.kubernetes.io/os: linux
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: catalog-operator
namespace: olm
labels:
app: catalog-operator
spec:
strategy:
type: RollingUpdate
replicas: 1
selector:
matchLabels:
app: catalog-operator
template:
metadata:
labels:
app: catalog-operator
spec:
serviceAccountName: olm-operator-serviceaccount
containers:
- name: catalog-operator
command:
- /bin/catalog
args:
- '-namespace'
- olm
- -configmapServerImage=quay.io/operator-framework/configmap-operator-registry:latest
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
- containerPort: 8081
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
terminationMessagePolicy: FallbackToLogsOnError
env:
resources:
requests:
cpu: 10m
memory: 80Mi
nodeSelector:
beta.kubernetes.io/os: linux
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aggregate-olm-edit
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups: ["operators.coreos.com"]
resources: ["subscriptions"]
verbs: ["create", "update", "patch", "delete"]
- apiGroups: ["operators.coreos.com"]
resources: ["clusterserviceversions", "catalogsources", "installplans", "subscriptions"]
verbs: ["delete"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aggregate-olm-view
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: ["operators.coreos.com"]
resources: ["clusterserviceversions", "catalogsources", "installplans", "subscriptions", "operatorgroups"]
verbs: ["get", "list", "watch"]
- apiGroups: ["packages.operators.coreos.com"]
resources: ["packagemanifests", "packagemanifests/icon"]
verbs: ["get", "list", "watch"]
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: global-operators
namespace: operators
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: olm-operators
namespace: olm
spec:
targetNamespaces:
- olm
---
apiVersion: operators.coreos.com/v1alpha1
kind: ClusterServiceVersion
metadata:
name: packageserver
namespace: olm
labels:
olm.version: 0.14.1
spec:
displayName: Package Server
description: Represents an Operator package that is available from a given CatalogSource which will resolve to a ClusterServiceVersion.
minKubeVersion: 1.11.0
keywords: ['packagemanifests', 'olm', 'packages']
maintainers:
- name: Red Hat
email: openshift-operators@redhat.com
provider:
name: Red Hat
links:
- name: Package Server
url: https://github.com/operator-framework/operator-lifecycle-manager/tree/master/pkg/package-server
installModes:
- type: OwnNamespace
supported: true
- type: SingleNamespace
supported: true
- type: MultiNamespace
supported: true
- type: AllNamespaces
supported: true
install:
strategy: deployment
spec:
clusterPermissions:
- serviceAccountName: olm-operator-serviceaccount
rules:
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- "operators.coreos.com"
resources:
- catalogsources
verbs:
- get
- list
- watch
- apiGroups:
- "packages.operators.coreos.com"
resources:
- packagemanifests
verbs:
- get
- list
deployments:
- name: packageserver
spec:
strategy:
type: RollingUpdate
replicas: 2
selector:
matchLabels:
app: packageserver
template:
metadata:
labels:
app: packageserver
spec:
serviceAccountName: olm-operator-serviceaccount
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: packageserver
command:
- /bin/package-server
- -v=4
- --secure-port
- "5443"
- --global-namespace
- olm
image: quay.io/operator-framework/olm@sha256:0d15ffb5d10a176ef6e831d7865f98d51255ea5b0d16403618c94a004d049373
imagePullPolicy: Always
ports:
- containerPort: 5443
livenessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: 5443
readinessProbe:
httpGet:
scheme: HTTPS
path: /healthz
port: 5443
terminationMessagePolicy: FallbackToLogsOnError
resources:
requests:
cpu: 10m
memory: 50Mi
maturity: alpha
version: 0.14.1
apiservicedefinitions:
owned:
- group: packages.operators.coreos.com
version: v1
kind: PackageManifest
name: packagemanifests
displayName: PackageManifest
description: A PackageManifest is a resource generated from existing CatalogSources and their ConfigMaps
deploymentName: packageserver
containerPort: 5443
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: operatorhubio-catalog
namespace: olm
spec:
sourceType: grpc
image: quay.io/operator-framework/upstream-community-operators:latest
displayName: Community Operators
publisher: OperatorHub.io

View File

@ -101,7 +101,11 @@ var Addons = []*Addon{
set: SetBool,
callbacks: []setFn{enableOrDisableAddon},
},
{
name: "olm",
set: SetBool,
callbacks: []setFn{enableOrDisableAddon},
},
{
name: "registry",
set: SetBool,

View File

@ -209,6 +209,20 @@ var Addons = map[string]*Addon{
"0640",
false),
}, false, "metrics-server"),
"olm": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/olm/crds.yaml",
vmpath.GuestAddonsDir,
"crds.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/olm/olm.yaml",
vmpath.GuestAddonsDir,
"olm.yaml",
"0640",
false),
}, false, "olm"),
"registry": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/registry/registry-rc.yaml.tmpl",

View File

@ -72,6 +72,25 @@ func HostIP(host *host.Host) (net.IP, error) {
re = regexp.MustCompile(`(?s)Name:\s*` + iface + `.+IPAddress:\s*(\S+)`)
ip := re.FindStringSubmatch(string(ipList))[1]
return net.ParseIP(ip), nil
case driver.Parallels:
bin := "prlsrvctl"
var binPath string
if fullPath, err := exec.LookPath(bin); err != nil {
binPath = fullPath
} else {
binPath = bin
}
out, err := exec.Command(binPath, "net", "info", "Shared").Output()
if err != nil {
return []byte{}, errors.Wrap(err, "Error reading the info of Parallels Shared network interface")
}
re := regexp.MustCompile(`IPv4 address: (.*)`)
ipMatch := re.FindStringSubmatch(string(out))
if len(ipMatch) < 2 {
return []byte{}, errors.Wrap(err, "Error getting the IP address of Parallels Shared network interface")
}
ip := ipMatch[1]
return net.ParseIP(ip), nil
case driver.HyperKit:
return net.ParseIP("192.168.64.1"), nil
case driver.VMware:

View File

@ -23,7 +23,6 @@ import (
// supportedDrivers is a list of supported drivers on Linux.
var supportedDrivers = []string{
VirtualBox,
Parallels,
VMwareFusion,
KVM2,
VMware,

View File

@ -328,3 +328,108 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string)
t.Errorf("failed disabling helm-tiller addon. arg %q.s %v", rr.Command(), err)
}
}
func TestOlmAddon(t *testing.T) {
profile := UniqueProfileName("addons")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer Cleanup(t, profile, cancel)
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=olm"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Command(), err)
}
// Parallelized tests
t.Run("parallel", func(t *testing.T) {
tests := []struct {
name string
validator validateFunc
}{
{"Olm", validateOlmAddon},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
MaybeParallel(t)
tc.validator(ctx, t, profile)
})
}
})
// Assert that disable/enable works offline
rr, err = Run(t, exec.CommandContext(ctx, Target(), "stop", "-p", profile))
if err != nil {
t.Errorf("failed to stop minikube. args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "enable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to enable dashboard addon: args %q : %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "addons", "disable", "dashboard", "-p", profile))
if err != nil {
t.Errorf("failed to disable dashboard addon: args %q : %v", rr.Command(), err)
}
}
func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "catalog-operator", Minutes(6)); err != nil {
t.Errorf("failed waiting for catalog-operator deployment to stabilize: %v", err)
}
t.Logf("catalog-operator stabilized in %s", time.Since(start))
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "olm-operator", Minutes(6)); err != nil {
t.Errorf("failed waiting for olm-operator deployment to stabilize: %v", err)
}
t.Logf("olm-operator stabilized in %s", time.Since(start))
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "packageserver", Minutes(6)); err != nil {
t.Errorf("failed waiting for packageserver deployment to stabilize: %v", err)
}
t.Logf("packageserver stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "olm", "app=catalog-operator", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod catalog-operator: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "app=olm-operator", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod olm-operator: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "app=packageserver", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod packageserver: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "olm.catalogSource=operatorhubio-catalog", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod operatorhubio-catalog: %v", err)
}
// Install one sample Operator such as etcd
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", "https://operatorhub.io/install/etcd.yaml"))
if err != nil {
t.Logf("etcd operator installation with %s failed: %v", rr.Command(), err)
}
want := "Succeeded"
checkOperatorInstalled := func() error {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "csv", "-n", "my-etcd"))
if err != nil {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
}
if !strings.Contains(rr.Stdout.String(), want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
}
return nil
}
// Operator installation takes a while
if err := retry.Expo(checkOperatorInstalled, time.Second*3, Minutes(6)); err != nil {
t.Errorf("failed checking operator installed: %v", err.Error())
}
}