Add place holder bootstrapper for kicbs

pull/6112/head
Medya Gh 2019-12-18 16:11:50 -08:00
parent 83374f4371
commit 31ad90d3cb
4 changed files with 54 additions and 24 deletions

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubectl/pkg/util/templates"
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/kicbs"
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
@ -272,6 +273,12 @@ func getClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootst
if err != nil {
return nil, errors.Wrap(err, "getting kubeadm bootstrapper")
}
case bootstrapper.BootstrapperTypeKICBS:
b, err = kicbs.NewKICBSBootstrapper(api)
if err != nil {
return nil, errors.Wrap(err, "getting kicbs bootstrapper")
}
default:
return nil, fmt.Errorf("unknown bootstrapper: %s", bootstrapperName)
}

View File

@ -340,7 +340,6 @@ func runStart(cmd *cobra.Command, args []string) {
// exits here in case of --download-only option.
handleDownloadOnly(&cacheGroup, k8sVersion)
mRunner, preExists, machineAPI, host := startMachine(&config)
fmt.Println("After startMachine &config")
defer machineAPI.Close()
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner, driverName, config.KubernetesConfig)
@ -484,7 +483,6 @@ func startMachine(config *cfg.MachineConfig) (runner command.Runner, preExists b
}
ip := validateNetwork(host, runner)
fmt.Println("after validateNetwork")
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)

View File

@ -18,39 +18,63 @@ limitations under the License.
package kicbs
import (
"fmt"
"net"
"time"
"github.com/docker/machine/libmachine"
"github.com/pkg/errors"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/machine"
)
func PullImages(config.KubernetesConfig) error {
// Bootstrapper is a bootstrapper using kicbs
type Bootstrapper struct {
c command.Runner
contextName string
}
return nil
// NewKICBSBootstrapper creates a new kicbs.Bootstrapper
func NewKICBSBootstrapper(api libmachine.API) (*Bootstrapper, error) {
name := viper.GetString(config.MachineProfile)
h, err := api.Load(name)
if err != nil {
return nil, errors.Wrap(err, "getting api client")
}
runner, err := machine.CommandRunner(h)
if err != nil {
return nil, errors.Wrap(err, "command runner")
}
return &Bootstrapper{c: runner, contextName: name}, nil
}
func StartCluster(config.KubernetesConfig) error {
return nil
func (k *Bootstrapper) PullImages(config.KubernetesConfig) error {
return fmt.Errorf("the PullImages is not implemented in kicbs yet")
}
func UpdateCluster(config.MachineConfig) error {
return nil
func (k *Bootstrapper) StartCluster(config.KubernetesConfig) error {
return fmt.Errorf("the StartCluster is not implemented in kicbs yet")
}
func DeleteCluster(config.KubernetesConfig) error {
return nil
func (k *Bootstrapper) UpdateCluster(config.MachineConfig) error {
return fmt.Errorf("the UpdateCluster is not implemented in kicbs yet")
}
func WaitForCluster(config.KubernetesConfig, time.Duration) error {
return nil
func (k *Bootstrapper) DeleteCluster(config.KubernetesConfig) error {
return fmt.Errorf("the DeleteCluster is not implemented in kicbs yet")
}
func LogCommands(bootstrapper.LogOptions) map[string]string {
return nil
func (k *Bootstrapper) WaitForCluster(config.KubernetesConfig, time.Duration) error {
return fmt.Errorf("the WaitForCluster is not implemented in kicbs yet")
}
func SetupCerts(cfg config.KubernetesConfig) error {
return nil
func (k *Bootstrapper) LogCommands(bootstrapper.LogOptions) map[string]string {
return map[string]string{}
}
func GetKubeletStatus() (string, error) {
return "", nil
func (k *Bootstrapper) SetupCerts(cfg config.KubernetesConfig) error {
return fmt.Errorf("the SetupCerts is not implemented in kicbs yet")
}
func GetAPIServerStatus(net.IP, int) (string, error) {
return "", nil
func (k *Bootstrapper) GetKubeletStatus() (string, error) {
return "", fmt.Errorf("the GetKubeletStatus is not implemented in kicbs yet")
}
func (k *Bootstrapper) GetAPIServerStatus(net.IP, int) (string, error) {
return "", fmt.Errorf("the GetAPIServerStatus is not implemented in kicbs yet")
}

View File

@ -465,16 +465,17 @@ func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error
return nil, errors.Wrap(err, "create")
}
if !driver.BareMetal(config.VMDriver) && !driver.IsKIC(config.VMDriver) {
if driver.BareMetal(config.VMDriver) {
showLocalOsRelease()
} else if !driver.BareMetal(config.VMDriver) && !driver.IsKIC(config.VMDriver) {
showRemoteOsRelease(h.Driver)
// Ensure that even new VM's have proper time synchronization up front
// It's 2019, and I can't believe I am still dealing with time desync as a problem.
if err := ensureSyncedGuestClock(h); err != nil {
return h, err
}
} else {
showLocalOsRelease() // TODO:medyagh for kic show docker or podman version
}
} // TODO:medyagh add show-os release for kic
if err := api.Save(h); err != nil {
return nil, errors.Wrap(err, "save")
}