it works
parent
d4639c52b9
commit
335379dc59
|
|
@ -884,16 +884,26 @@ func validateRegistryMirror() {
|
|||
}
|
||||
}
|
||||
|
||||
func createNode(cc config.ClusterConfig, kubeNodeName string) (config.ClusterConfig, config.Node, error) {
|
||||
func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.ClusterConfig) (config.ClusterConfig, config.Node, error) {
|
||||
// Create the initial node, which will necessarily be a control plane
|
||||
cp := config.Node{
|
||||
Port: cc.KubernetesConfig.NodePort,
|
||||
KubernetesVersion: getKubernetesVersion(&cc),
|
||||
Name: kubeNodeName,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
var cp config.Node
|
||||
var err error
|
||||
if existing == nil {
|
||||
cp = config.Node{
|
||||
Port: cc.KubernetesConfig.NodePort,
|
||||
KubernetesVersion: getKubernetesVersion(&cc),
|
||||
Name: kubeNodeName,
|
||||
ControlPlane: true,
|
||||
Worker: true,
|
||||
}
|
||||
cc.Nodes = []config.Node{cp}
|
||||
} else {
|
||||
cp, err = config.PrimaryControlPlane(existing)
|
||||
if err != nil {
|
||||
return cc, config.Node{}, err
|
||||
}
|
||||
cc.Nodes = existing.Nodes
|
||||
}
|
||||
cc.Nodes = []config.Node{cp}
|
||||
return cc, cp, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -348,7 +348,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
if driver.BareMetal(cc.Driver) {
|
||||
kubeNodeName = "m01"
|
||||
}
|
||||
return createNode(cc, kubeNodeName)
|
||||
return createNode(cc, kubeNodeName, existing)
|
||||
}
|
||||
|
||||
// updateExistingConfigFromFlags will update the existing config from the flags - used on a second start
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ type Bootstrapper interface {
|
|||
GenerateToken(*config.ClusterConfig, *config.Node) (string, error)
|
||||
// LogCommands returns a map of log type to a command which will display that log.
|
||||
LogCommands(config.ClusterConfig, LogOptions) map[string]string
|
||||
SetupCerts(config.KubernetesConfig, config.Node, bool) error
|
||||
SetupCerts(config.KubernetesConfig, config.Node) error
|
||||
GetAPIServerStatus(string, int) (string, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ func WaitForHealthyAPIServer(r cruntime.Manager, bs bootstrapper.Bootstrapper, c
|
|||
}
|
||||
|
||||
if err := wait.PollImmediate(kconst.APICallRetryInterval, kconst.DefaultControlPlaneTimeout, healthz); err != nil {
|
||||
return fmt.Errorf("apiserver healthz never reported healthy")
|
||||
return fmt.Errorf("apiserver healthz never reported healthy: %v", err)
|
||||
}
|
||||
|
||||
vcheck := func() (bool, error) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ import (
|
|||
)
|
||||
|
||||
// SetupCerts gets the generated credentials required to talk to the APIServer.
|
||||
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node, keepContext bool) ([]assets.CopyableFile, error) {
|
||||
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) {
|
||||
localPath := localpath.Profile(k8s.ClusterName)
|
||||
glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP)
|
||||
|
||||
|
|
@ -56,9 +56,12 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node,
|
|||
return nil, errors.Wrap(err, "shared CA certs")
|
||||
}
|
||||
|
||||
xfer, err := generateProfileCerts(k8s, n, ccs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "profile certs")
|
||||
var xfer []string
|
||||
if n.ControlPlane {
|
||||
xfer, err = generateProfileCerts(k8s, n, ccs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "profile certs")
|
||||
}
|
||||
}
|
||||
|
||||
xfer = append(xfer, ccs.caCert)
|
||||
|
|
@ -99,7 +102,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node,
|
|||
ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"),
|
||||
ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"),
|
||||
CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"),
|
||||
KeepContext: keepContext,
|
||||
KeepContext: false,
|
||||
}
|
||||
|
||||
kubeCfg := api.NewConfig()
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ func TestSetupCerts(t *testing.T) {
|
|||
f := command.NewFakeCommandRunner()
|
||||
f.SetCommandToOutput(expected)
|
||||
|
||||
_, err := SetupCerts(f, k8s, config.Node{}, false)
|
||||
_, err := SetupCerts(f, k8s, config.Node{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error starting cluster: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -623,10 +623,6 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC
|
|||
|
||||
func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) error {
|
||||
|
||||
if kverify.KubeletStatus(k.c) == state.Running {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := k.clearStaleConfigs(cc); err != nil {
|
||||
return errors.Wrap(err, "clearing stale configs")
|
||||
}
|
||||
|
|
@ -640,11 +636,12 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err
|
|||
return errors.Wrap(err, "getting control plane endpoint")
|
||||
}
|
||||
|
||||
// Make sure to account for if n.Token doesn't exist for older configs
|
||||
cmd := fmt.Sprintf("%s join phase kubelet-start %s --token %s --discovery-token-unsafe-skip-ca-verification", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), net.JoinHostPort(host, strconv.Itoa(port)), n.Token)
|
||||
_, err = k.c.RunCmd(exec.Command("/bin/bash", "-c", cmd))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "running join phase kubelet-start")
|
||||
if !strings.Contains(err.Error(), "status \"Ready\" already exists in the cluster") {
|
||||
return errors.Wrap(err, "running join phase kubelet-start")
|
||||
}
|
||||
}
|
||||
|
||||
// This can fail during upgrades if the old pods have not shut down yet
|
||||
|
|
@ -665,6 +662,10 @@ func (k *Bootstrapper) restartWorker(cc config.ClusterConfig, n config.Node) err
|
|||
|
||||
// GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token
|
||||
func (k *Bootstrapper) GenerateToken(cc *config.ClusterConfig, n *config.Node) (string, error) {
|
||||
if n.Token != "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Generate the token so we can store it
|
||||
genTokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token generate", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)))
|
||||
r, err := k.c.RunCmd(genTokenCmd)
|
||||
|
|
@ -739,8 +740,8 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
|
|||
}
|
||||
|
||||
// SetupCerts sets up certificates within the cluster.
|
||||
func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node, keepContext bool) error {
|
||||
_, err := bootstrapper.SetupCerts(k.c, k8s, n, keepContext)
|
||||
func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error {
|
||||
_, err := bootstrapper.SetupCerts(k.c, k8s, n)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -147,6 +147,7 @@ func recreateIfNeeded(api libmachine.API, cc *config.ClusterConfig, n *config.No
|
|||
if err := api.Save(h); err != nil {
|
||||
return h, errors.Wrap(err, "save")
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
|||
return nil, errors.Wrap(err, "Failed to get bootstrapper")
|
||||
}
|
||||
|
||||
if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node, true); err != nil {
|
||||
if err = bs.SetupCerts(starter.Cfg.KubernetesConfig, *starter.Node); err != nil {
|
||||
return nil, errors.Wrap(err, "setting up certs")
|
||||
}
|
||||
|
||||
|
|
@ -163,14 +163,17 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
|||
}
|
||||
|
||||
// Make sure to use the command runner for the control plane to generate the join token
|
||||
cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting control plane bootstrapper")
|
||||
}
|
||||
var joinCmd string
|
||||
if !starter.PreExists || starter.Node.Token == "" {
|
||||
cpBs, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting control plane bootstrapper")
|
||||
}
|
||||
|
||||
joinCmd, err := cpBs.GenerateToken(starter.Cfg, starter.Node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "generating join token")
|
||||
joinCmd, err = cpBs.GenerateToken(starter.Cfg, starter.Node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "generating join token")
|
||||
}
|
||||
}
|
||||
|
||||
if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd, starter.PreExists); err != nil {
|
||||
|
|
@ -278,7 +281,7 @@ func setupKubeAdm(mAPI libmachine.API, cfg config.ClusterConfig, n config.Node,
|
|||
exit.WithError("Failed to update cluster", err)
|
||||
}
|
||||
|
||||
if err := bs.SetupCerts(cfg.KubernetesConfig, n, false); err != nil {
|
||||
if err := bs.SetupCerts(cfg.KubernetesConfig, n); err != nil {
|
||||
exit.WithError("Failed to setup certs", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
|
|||
name := "m03"
|
||||
|
||||
// Start the node back up
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name, "--alsologtostderr"))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
|
||||
if err != nil {
|
||||
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue