mirror of https://github.com/k3s-io/k3s.git
Add tests for control-plane component arg generation
Use mocked executor to ensure the correct args are being passed to components
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
(cherry picked from commit 1ba19856de
)
Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
pull/12050/head
parent
fb30c1f97b
commit
89b5ea2da6
|
@ -40,6 +40,10 @@ func Default() Driver {
|
|||
return drivers[0]
|
||||
}
|
||||
|
||||
func Clear() {
|
||||
drivers = []Driver{}
|
||||
}
|
||||
|
||||
// SnapshotResult is returned by the Snapshot function,
|
||||
// and lists the names of created and deleted snapshots.
|
||||
type SnapshotResult struct {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -183,7 +184,8 @@ func scheduler(ctx context.Context, cfg *config.Control) error {
|
|||
// finds a node that is ready to run pods during its initial scheduling loop.
|
||||
if !cfg.DisableCCM {
|
||||
logrus.Infof("Waiting for untainted node")
|
||||
if err := waitForUntaintedNode(ctx, runtime.KubeConfigScheduler); err != nil {
|
||||
// this waits forever for an untainted node; if it returns ErrWaitTimeout the context has been cancelled, and it is not a fatal error
|
||||
if err := waitForUntaintedNode(ctx, runtime.KubeConfigScheduler); err != nil && !errors.Is(err, wait.ErrWaitTimeout) {
|
||||
logrus.Fatalf("failed to wait for untained node: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
package control
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/cli/cmds"
|
||||
"github.com/k3s-io/k3s/pkg/cluster"
|
||||
"github.com/k3s-io/k3s/pkg/cluster/managed"
|
||||
"github.com/k3s-io/k3s/pkg/daemons/config"
|
||||
"github.com/k3s-io/k3s/pkg/daemons/control/deps"
|
||||
"github.com/k3s-io/k3s/pkg/daemons/executor"
|
||||
"github.com/k3s-io/k3s/pkg/etcd"
|
||||
testutil "github.com/k3s-io/k3s/tests"
|
||||
"github.com/k3s-io/k3s/tests/mock"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"go.uber.org/mock/gomock"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authentication/request/anonymous"
|
||||
)
|
||||
|
||||
func Test_UnitServer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(context.Context, *testing.T) (*config.Control, error)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ControlPlane+ETCD",
|
||||
setup: func(ctx context.Context, t *testing.T) (*config.Control, error) {
|
||||
control, err := mockControl(ctx, t, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
control.DisableCCM = true
|
||||
|
||||
executor := mock.NewExecutorWithEmbeddedETCD(t)
|
||||
|
||||
// leader-elect should NOT be disabled when using etcd
|
||||
matchLeaderElectArgs := mock.GM(Not(ContainElement(ContainSubstring("--leader-elect=false"))))
|
||||
|
||||
executor.EXPECT().APIServerHandlers(gomock.Any()).MinTimes(1).DoAndReturn(mockHandlers)
|
||||
executor.EXPECT().APIServer(gomock.Any(), gomock.Any()).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().Scheduler(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().ControllerManager(gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().CloudControllerManager(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
|
||||
return control, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ETCD Only",
|
||||
setup: func(ctx context.Context, t *testing.T) (*config.Control, error) {
|
||||
control, err := mockControl(ctx, t, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
control.DisableAPIServer = true
|
||||
control.DisableCCM = true
|
||||
control.DisableControllerManager = true
|
||||
control.DisableScheduler = true
|
||||
control.DisableServiceLB = true
|
||||
|
||||
mock.NewExecutorWithEmbeddedETCD(t)
|
||||
|
||||
// don't need to test anything else, the mock will fail if we get any unexpected calls to executor methods
|
||||
|
||||
return control, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ControlPlane+Kine",
|
||||
setup: func(ctx context.Context, t *testing.T) (*config.Control, error) {
|
||||
control, err := mockControl(ctx, t, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
control.DisableCCM = true
|
||||
|
||||
executor := mock.NewExecutorWithEmbeddedETCD(t)
|
||||
|
||||
// leader-elect should be disabled when using kine+sqlite
|
||||
matchLeaderElectArgs := mock.GM(ContainElement(ContainSubstring("--leader-elect=false")))
|
||||
|
||||
executor.EXPECT().APIServerHandlers(gomock.Any()).MinTimes(1).DoAndReturn(mockHandlers)
|
||||
executor.EXPECT().APIServer(gomock.Any(), gomock.Any()).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().Scheduler(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().ControllerManager(gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().CloudControllerManager(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
|
||||
return control, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ControlPlane+Kine with authorization-config",
|
||||
setup: func(ctx context.Context, t *testing.T) (*config.Control, error) {
|
||||
control, err := mockControl(ctx, t, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
control.DisableCCM = true
|
||||
|
||||
executor := mock.NewExecutorWithEmbeddedETCD(t)
|
||||
|
||||
// authorization-mode should not be set when user sets --authorization-config
|
||||
control.ExtraAPIArgs = []string{"authorization-config=/dev/null"}
|
||||
matchAuthArgs := mock.GM(And(
|
||||
ContainElement(ContainSubstring("--authorization-config")),
|
||||
Not(ContainElement(ContainSubstring("--authorization-mode"))),
|
||||
))
|
||||
|
||||
// leader-elect should be disabled when using kine+sqlite
|
||||
matchLeaderElectArgs := mock.GM(ContainElement(ContainSubstring("--leader-elect=false")))
|
||||
|
||||
executor.EXPECT().APIServerHandlers(gomock.Any()).MinTimes(1).DoAndReturn(mockHandlers)
|
||||
executor.EXPECT().APIServer(gomock.Any(), matchAuthArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().Scheduler(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().ControllerManager(gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
executor.EXPECT().CloudControllerManager(gomock.Any(), gomock.Any(), matchLeaderElectArgs).MinTimes(1).Return(nil)
|
||||
|
||||
return control, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// reset managed etcd driver state for each test
|
||||
managed.Clear()
|
||||
managed.RegisterDriver(etcd.NewETCD())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer func() {
|
||||
// give time for the cluster datastore to finish saving after the cluster is started;
|
||||
// it'll panic if the context is cancelled while this is in progress
|
||||
time.Sleep(time.Second)
|
||||
cancel()
|
||||
// give time for etcd to shut down between tests, following context cancellation
|
||||
time.Sleep(time.Second * 10)
|
||||
}()
|
||||
|
||||
// generate control config
|
||||
cfg, err := tt.setup(ctx, t)
|
||||
if err != nil {
|
||||
t.Errorf("Setup for Server() failed = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// bootstrap the executor with dummy node config
|
||||
nodeConfig := &config.Node{
|
||||
AgentConfig: config.Agent{
|
||||
KubeConfigK3sController: cfg.Runtime.KubeConfigController,
|
||||
},
|
||||
}
|
||||
if err := executor.Bootstrap(ctx, nodeConfig, cmds.AgentConfig); err != nil {
|
||||
t.Errorf("Executor Bootstrap() failed = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// test Server now that everything's set up
|
||||
if err := Server(ctx, cfg); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Server() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mockControl(ctx context.Context, t *testing.T, clusterInit bool) (*config.Control, error) {
|
||||
control := &config.Control{
|
||||
AgentToken: "agent-token",
|
||||
ClusterInit: clusterInit,
|
||||
DataDir: t.TempDir(),
|
||||
ServerNodeName: "k3s-server-1",
|
||||
ServiceNodePortRange: &utilnet.PortRange{Base: 30000, Size: 2048},
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
if err := os.Chdir(control.DataDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
os.Setenv("NODE_NAME", control.ServerNodeName)
|
||||
testutil.GenerateRuntime(control)
|
||||
|
||||
control.Cluster = cluster.New(control)
|
||||
if err := control.Cluster.Bootstrap(ctx, control.ClusterReset); err != nil {
|
||||
return nil, pkgerrors.WithMessage(err, "failed to bootstrap cluster data")
|
||||
}
|
||||
|
||||
if err := deps.GenServerDeps(control); err != nil {
|
||||
return nil, pkgerrors.WithMessage(err, "failed to generate server dependencies")
|
||||
}
|
||||
|
||||
return control, nil
|
||||
}
|
||||
|
||||
func mockHandlers(ctx context.Context) (authenticator.Request, http.Handler, error) {
|
||||
return &anonymous.Authenticator{}, http.NotFoundHandler(), nil
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"os"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/agent/containerd"
|
||||
|
@ -39,6 +40,8 @@ import (
|
|||
_ "github.com/k3s-io/k3s/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var once sync.Once
|
||||
|
||||
func init() {
|
||||
executor = &Embedded{}
|
||||
}
|
||||
|
@ -49,7 +52,7 @@ func (e *Embedded) Bootstrap(ctx context.Context, nodeConfig *daemonconfig.Node,
|
|||
e.criReady = make(chan struct{})
|
||||
e.nodeConfig = nodeConfig
|
||||
|
||||
go func() {
|
||||
go once.Do(func() {
|
||||
// Ensure that the log verbosity remains set to the configured level by resetting it at 1-second intervals
|
||||
// for the first 2 minutes that K3s is starting up. This is necessary because each of the Kubernetes
|
||||
// components will initialize klog and reset the verbosity flag when they are starting.
|
||||
|
@ -66,7 +69,7 @@ func (e *Embedded) Bootstrap(ctx context.Context, nodeConfig *daemonconfig.Node,
|
|||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ func WaitForAPIServerReady(ctx context.Context, kubeconfigPath string, timeout t
|
|||
return true, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
return merr.NewErrors(err, lastErr)
|
||||
}
|
||||
|
||||
|
|
|
@ -10,24 +10,25 @@ import (
|
|||
// NewExecutorWithEmbeddedETCD creates a new mock executor, and sets it as the current executor.
|
||||
// The executor exepects calls to ETCD(), and wraps the embedded executor method of the same name.
|
||||
// The various ready channels are also mocked with immediate channel closure.
|
||||
func NewExecutorWithEmbeddedETCD(t *testing.T) {
|
||||
func NewExecutorWithEmbeddedETCD(t *testing.T) *Executor {
|
||||
mockController := gomock.NewController(t)
|
||||
mockExecutor := NewExecutor(mockController)
|
||||
|
||||
embed := &executor.Embedded{}
|
||||
initialOptions := func() (executor.InitialOptions, error) {
|
||||
return executor.InitialOptions{}, nil
|
||||
}
|
||||
mockExecutor.EXPECT().Bootstrap(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(embed.Bootstrap)
|
||||
mockExecutor.EXPECT().CurrentETCDOptions().AnyTimes().DoAndReturn(embed.CurrentETCDOptions)
|
||||
mockExecutor.EXPECT().ETCD(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(embed.ETCD)
|
||||
mockExecutor.EXPECT().ETCDReadyChan().AnyTimes().DoAndReturn(embed.ETCDReadyChan)
|
||||
|
||||
closedChannel := func() <-chan struct{} {
|
||||
c := make(chan struct{})
|
||||
close(c)
|
||||
return c
|
||||
}
|
||||
|
||||
mockExecutor.EXPECT().ETCD(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(embed.ETCD)
|
||||
mockExecutor.EXPECT().CurrentETCDOptions().AnyTimes().DoAndReturn(initialOptions)
|
||||
mockExecutor.EXPECT().APIServerReadyChan().AnyTimes().DoAndReturn(closedChannel)
|
||||
mockExecutor.EXPECT().CRIReadyChan().AnyTimes().DoAndReturn(closedChannel)
|
||||
mockExecutor.EXPECT().ETCDReadyChan().AnyTimes().DoAndReturn(closedChannel)
|
||||
|
||||
executor.Set(mockExecutor)
|
||||
|
||||
return mockExecutor
|
||||
}
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
package mock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type gomockGomegaMatcher struct {
|
||||
gm types.GomegaMatcher
|
||||
x any
|
||||
}
|
||||
|
||||
// GM wraps a gomega matcher for use as a gomock matcher
|
||||
func GM(gm types.GomegaMatcher) *gomockGomegaMatcher {
|
||||
return &gomockGomegaMatcher{gm: gm}
|
||||
}
|
||||
|
||||
func (g *gomockGomegaMatcher) Matches(x any) bool {
|
||||
g.x = x
|
||||
ok, _ := g.gm.Match(x)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (g *gomockGomegaMatcher) String() string {
|
||||
if g.x != nil {
|
||||
ok, err := g.gm.Match(g.x)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
if !ok {
|
||||
return g.gm.FailureMessage(g.x)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%T", g.gm)
|
||||
}
|
Loading…
Reference in New Issue