e2e.go: Add -deployment, add a kops deployment method

This splits off all the bash stuff into an interface, and plumbs
through a separate interface to bring up a cluster using "kops"
instead. Right now it assumes kops == AWS.
pull/6/head
Zach Loafman 2016-09-26 15:01:43 -07:00
parent cf7301f16c
commit d905478e0a
2 changed files with 192 additions and 11 deletions

View File

@ -26,6 +26,7 @@ import (
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
)
@ -38,6 +39,7 @@ var (
"You can explicitly set to false if you're, e.g., testing client changes "+
"for which the server version doesn't make a difference.")
checkLeakedResources = flag.Bool("check_leaked_resources", false, "Ensure project ends with the same resources")
deployment = flag.String("deployment", "bash", "up/down mechanism (defaults to cluster/kube-{up,down}.sh) (choices: bash/kops)")
down = flag.Bool("down", false, "If true, tear down the cluster before exiting.")
dump = flag.String("dump", "", "If set, dump cluster logs to this location on test or cluster-up failure")
kubemark = flag.Bool("kubemark", false, "If true, run kubemark tests.")
@ -48,6 +50,14 @@ var (
upgradeArgs = flag.String("upgrade_args", "", "If set, run upgrade tests before other tests")
verbose = flag.Bool("v", false, "If true, print all command output.")
// kops specific flags.
kopsPath = flag.String("kops", "", "(kops only) Path to the kops binary. Must be set for kops.")
kopsCluster = flag.String("kops-cluster", "", "(kops only) Cluster name. Must be set for kops.")
kopsState = flag.String("kops-state", os.Getenv("KOPS_STATE_STORE"), "(kops only) s3:// path to kops state store. Must be set. (This flag defaults to $KOPS_STATE_STORE, and overrides it if set.)")
kopsZones = flag.String("kops-zones", "us-west-2a", "(kops AWS only) AWS zones for kops deployment, comma delimited.")
kopsNodes = flag.Int("kops-nodes", 2, "(kops only) Number of nodes to create.")
// Deprecated flags.
deprecatedPush = flag.Bool("push", false, "Deprecated. Does nothing.")
deprecatedPushup = flag.Bool("pushup", false, "Deprecated. Does nothing.")
deprecatedCtlCmd = flag.String("ctl", "", "Deprecated. Does nothing.")
@ -141,12 +151,17 @@ func main() {
log.Fatalf("Called from invalid working directory: %v", err)
}
if err := run(); err != nil {
deploy, err := getDeployer()
if err != nil {
log.Fatalf("Error creating deployer: %v", err)
}
if err := run(deploy); err != nil {
log.Fatalf("Something went wrong: %s", err)
}
}
func run() error {
func run(deploy deployer) error {
if *dump != "" {
defer writeXML(time.Now())
}
@ -167,7 +182,7 @@ func run() error {
os.Setenv("KUBE_RUNTIME_CONFIG", "batch/v2alpha1=true")
if *up {
if err := xmlWrap("TearDown", TearDown); err != nil {
if err := xmlWrap("TearDown", deploy.Down); err != nil {
return fmt.Errorf("error tearing down previous cluster: %s", err)
}
}
@ -190,7 +205,7 @@ func run() error {
if *up {
// Start the cluster using this version.
if err := xmlWrap("Up", Up); err != nil {
if err := xmlWrap("Up", deploy.Up); err != nil {
return fmt.Errorf("starting e2e cluster: %s", err)
}
}
@ -209,6 +224,7 @@ func run() error {
}
if *test {
errs = appendError(errs, xmlWrap("get kubeconfig", deploy.SetupKubecfg))
errs = appendError(errs, xmlWrap("kubectl version", func() error {
return finishRunning("kubectl version", exec.Command("./cluster/kubectl.sh", "version", "--match-server-version=false"))
}))
@ -216,7 +232,7 @@ func run() error {
if *skewTests {
errs = appendError(errs, SkewTest())
} else {
if err := xmlWrap("IsUp", IsUp); err != nil {
if err := xmlWrap("IsUp", deploy.IsUp); err != nil {
errs = appendError(errs, err)
} else {
errs = appendError(errs, Test())
@ -235,7 +251,7 @@ func run() error {
}
if *down {
errs = appendError(errs, xmlWrap("TearDown", TearDown))
errs = appendError(errs, xmlWrap("TearDown", deploy.Down))
}
if *checkLeakedResources {
@ -337,19 +353,180 @@ func Build() error {
return nil
}
func TearDown() error {
return finishRunning("teardown", exec.Command("./hack/e2e-internal/e2e-down.sh"))
type deployer interface {
Up() error
IsUp() error
SetupKubecfg() error
Down() error
}
func Up() error {
func getDeployer() (deployer, error) {
switch *deployment {
case "bash":
return bash{}, nil
case "kops":
return NewKops()
default:
return nil, fmt.Errorf("Unknown deployment strategy %q", *deployment)
}
}
type bash struct{}
func (b bash) Up() error {
return finishRunning("up", exec.Command("./hack/e2e-internal/e2e-up.sh"))
}
// Is the e2e cluster up?
func IsUp() error {
func (b bash) IsUp() error {
return finishRunning("get status", exec.Command("./hack/e2e-internal/e2e-status.sh"))
}
func (b bash) SetupKubecfg() error {
return nil
}
func (b bash) Down() error {
return finishRunning("teardown", exec.Command("./hack/e2e-internal/e2e-down.sh"))
}
type kops struct {
path string
zones []string
nodes int
cluster string
kubecfg string
}
func NewKops() (*kops, error) {
if *kopsPath == "" {
return nil, fmt.Errorf("--kops must be set to a valid binary path for kops deployment.")
}
if *kopsCluster == "" {
return nil, fmt.Errorf("--kops-cluster must be set to a valid cluster name for kops deployment.")
}
if *kopsState == "" {
return nil, fmt.Errorf("--kops-state must be set to a valid S3 path for kops deployment.")
}
if err := os.Setenv("KOPS_STATE_STORE", *kopsState); err != nil {
return nil, err
}
f, err := ioutil.TempFile("", "kops-kubecfg")
if err != nil {
return nil, err
}
defer f.Close()
kubecfg := f.Name()
if err := f.Chmod(0600); err != nil {
return nil, err
}
if err := os.Setenv("KUBECONFIG", kubecfg); err != nil {
return nil, err
}
// Set KUBERNETES_CONFORMANCE_TEST so the auth info is picked up
// from kubectl instead of bash inference.
if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil {
return nil, err
}
// Set KUBERNETES_CONFORMANCE_PROVIDER to override the
// cloudprovider for KUBERNETES_CONFORMANCE_TEST.
if err := os.Setenv("KUBERNETES_CONFORMANCE_PROVIDER", "aws"); err != nil {
return nil, err
}
// ZONE is required by the AWS e2e tests
zones := strings.Split(*kopsZones, ",")
if err := os.Setenv("ZONE", zones[0]); err != nil {
return nil, err
}
return &kops{
path: *kopsPath,
zones: zones,
nodes: *kopsNodes,
cluster: *kopsCluster,
kubecfg: kubecfg,
}, nil
}
func (k kops) Up() error {
if err := finishRunning("kops config", exec.Command(
k.path, "create", "cluster",
"--name", k.cluster,
"--node-count", strconv.Itoa(k.nodes),
"--zones", strings.Join(k.zones, ","))); err != nil {
return fmt.Errorf("kops configuration failed: %v", err)
}
if err := finishRunning("kops update", exec.Command(k.path, "update", "cluster", k.cluster, "--yes")); err != nil {
return fmt.Errorf("kops bringup failed: %v", err)
}
// TODO(zmerlynn): More cluster validation. This should perhaps be
// added to kops and not here, but this is a fine place to loop
// for now.
for stop := time.Now().Add(10 * time.Minute); time.Now().Before(stop); time.Sleep(30 * time.Second) {
n, err := clusterSize(k)
if err != nil {
log.Printf("Can't get cluster size, sleeping: %v", err)
continue
}
if n < k.nodes+1 {
log.Printf("%d (current nodes) < %d (requested instances), sleeping", n, k.nodes+1)
continue
}
return nil
}
return fmt.Errorf("kops bringup timed out")
}
func (k kops) IsUp() error {
n, err := clusterSize(k)
if err != nil {
return err
}
if n <= 0 {
return fmt.Errorf("kops cluster found, but %d nodes reported", n)
}
return nil
}
func (k kops) SetupKubecfg() error {
info, err := os.Stat(k.kubecfg)
if err != nil {
return err
}
if info.Size() > 0 {
// Assume that if we already have it, it's good.
return nil
}
if err := finishRunning("kops export", exec.Command(k.path, "export", "kubecfg", k.cluster)); err != nil {
return fmt.Errorf("Failure exporting kops kubecfg: %v", err)
}
return nil
}
func (k kops) Down() error {
// We do a "kops get" first so the exit status of "kops delete" is
// more sensical in the case of a non-existant cluster. ("kops
// delete" will exit with status 1 on a non-existant cluster)
err := finishRunning("kops get", exec.Command(k.path, "get", "clusters", k.cluster))
if err != nil {
// This is expected if the cluster doesn't exist.
return nil
}
return finishRunning("kops delete", exec.Command(k.path, "delete", "cluster", k.cluster, "--yes"))
}
func clusterSize(deploy deployer) (int, error) {
if err := deploy.SetupKubecfg(); err != nil {
return -1, err
}
o, err := exec.Command("kubectl", "get", "nodes", "--no-headers").Output()
if err != nil {
log.Printf("kubectl get nodes failed: %v", err)
return -1, err
}
stdout := strings.TrimSpace(string(o))
log.Printf("Cluster nodes:\n%s", stdout)
return len(strings.Split(stdout, "\n")), nil
}
func DumpClusterLogs(location string) error {
log.Printf("Dumping cluster logs to: %v", location)
return finishRunning("dump cluster logs", exec.Command("./cluster/log-dump.sh", location))

View File

@ -266,6 +266,10 @@ k8s-bin-dir
k8s-build-output
keep-gogoproto
km-path
kops-cluster
kops-nodes
kops-state
kops-zones
kube-api-burst
kube-api-content-type
kube-api-qps