Add more E2E tests and improvement (#3111)

* remove checked in binary and update test/e2e Makefile

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* remove platform specific tests for now

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* install velero before running tests and robust makefiles

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* changelog

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* running e2e tests expects credentials file to be supplied
run e2e tests on velero/velero:main image by default

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* refactor to parameterize tests

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* rename files to use provider tests convention

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* rename tests file

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* remove providerName config

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* run kibishii test on azure

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* refactor to make bsl vsl configurable

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* skip e2e tests when not explicitly running e2e tests

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* update e2e docs

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* refactor and update docs

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* refactor

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* cleanup

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

* use velero's exec package

Signed-off-by: Ashish Amarnath <ashisham@vmware.com>

Co-authored-by: Dave Smith-Uchida <dsmithuchida@vmware.com>
pull/3174/head
Ashish Amarnath 2020-12-09 16:26:05 -08:00 committed by GitHub
parent 844cc16803
commit 249215f1ff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 399 additions and 315 deletions

View File

@ -333,5 +333,5 @@ gen-docs:
@hack/release-tools/gen-docs.sh @hack/release-tools/gen-docs.sh
.PHONY: test-e2e .PHONY: test-e2e
test-e2e: test-e2e: local
$(MAKE) -C test/e2e run $(MAKE) -C test/e2e run

View File

@ -25,17 +25,18 @@ TARGETS=(
./cmd/... ./cmd/...
./pkg/... ./pkg/...
./internal/... ./internal/...
./test/...
) )
if [[ ${#@} -ne 0 ]]; then if [[ ${#@} -ne 0 ]]; then
TARGETS=("$@") TARGETS=("$@")
fi fi
echo "Running tests:" "${TARGETS[@]}" echo "Running all short tests in:" "${TARGETS[@]}"
if [[ -n "${GOFLAGS:-}" ]]; then if [[ -n "${GOFLAGS:-}" ]]; then
echo "GOFLAGS: ${GOFLAGS}" echo "GOFLAGS: ${GOFLAGS}"
fi fi
go test -installsuffix "static" -timeout 60s "${TARGETS[@]}" go test -installsuffix "static" -short -timeout 60s "${TARGETS[@]}"
echo "Success!" echo "Success!"

View File

@ -46,7 +46,14 @@ GINKGO := $(GOPATH)/bin/ginkgo
KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize
OUTPUT_DIR := _output/$(GOOS)/$(GOARCH)/bin OUTPUT_DIR := _output/$(GOOS)/$(GOARCH)/bin
GINKGO_FOCUS ?= GINKGO_FOCUS ?=
CLOUD_PLATFORM ?= kind VELERO_CLI ?=$$(pwd)/../../_output/bin/$(GOOS)/$(GOARCH)/velero
VELERO_IMAGE ?= velero/velero:main
CREDS_FILE ?=
BSL_BUCKET ?=
BSL_PREFIX ?=
BSL_CONFIG ?=
VSL_CONFIG ?=
PLUGIN_PROVIDER ?=
.PHONY:ginkgo .PHONY:ginkgo
ginkgo: # Make sure ginkgo is in $GOPATH/bin ginkgo: # Make sure ginkgo is in $GOPATH/bin
@ -54,7 +61,20 @@ ginkgo: # Make sure ginkgo is in $GOPATH/bin
.PHONY: run .PHONY: run
run: ginkgo run: ginkgo
$(GINKGO) -v -focus="$(GINKGO_FOCUS)" . -- -velerocli=../../_output/bin/$(GOOS)/$(GOARCH)/velero -cloudplatform=$(CLOUD_PLATFORM) @[ "${CREDS_FILE}" ] && echo "Using credentials from ${CREDS_FILE}" || \
( echo "A credentials file is required to run E2E tests, please re-run the make target with CREDS_FILE=<PathToCredentialsFile>"; exit 1 )
@[ "${BSL_BUCKET}" ] && echo "Using bucket ${BSL_BUCKET} to store backups from E2E tests" || \
(echo "Bucket to store the backups from E2E tests is required, please re-run with BSL_BUCKET=<BucketName>"; exit 1 )
@[ "${PLUGIN_PROVIDER}" ] && echo "Using plugin provider ${PLUGIN_PROVIDER} for object storage and volume snaphotting" || \
(echo "Plugin provider to store the backups from E2E tests is required, please re-run with PLUGIN_PROVIDER=<pluginProviderName>"; exit 1 )
@$(GINKGO) -v -focus="$(GINKGO_FOCUS)" . -- -velerocli=$(VELERO_CLI) \
-velero-image=$(VELERO_IMAGE) \
-credentials-file=$(CREDS_FILE) \
-bucket=$(BSL_BUCKET) \
-prefix=$(BSL_PREFIX) \
-bsl-config=$(BSL_CONFIG) \
-vsl-config=$(VSL_CONFIG) \
-plugin-provider=$(PLUGIN_PROVIDER)
build: ginkgo build: ginkgo
mkdir -p $(OUTPUT_DIR) mkdir -p $(OUTPUT_DIR)

View File

@ -2,45 +2,74 @@
Document for running Velero end-to-end test suite. Document for running Velero end-to-end test suite.
## Command line flags for E2E tests The E2E tests are validating end-to-end behavior of Velero including install, backup and restore operations. These tests take longer to complete and is not expected to be part of day-to-day developer workflow. It is for this reason that they are disabled when running unit tests. This is accomplished by running unit tests in [`short`](https://golang.org/pkg/testing/#Short) mode using the `-short` flag to `go test`.
Command line flags can be set after If you previously ran unit tests using the `go test ./...` command or any of its variations, then you will now run the same command with the `-short` flag to `go test` to accomplish the same behavior. Alternatively, you can use the `make test` command to run unit tests.
```
velerocli - the velero CLI to use ## Prerequisites
kibishiins - the namespace to install kibishii in
cloudplatform - the cloud platform the tests will be run against (aws, vsphere, azure) Running the E2E tests expects:
``` 1. A running kubernetes cluster:
1. With DNS and CNI installed.
1. Compatible with Velero- running Kubernetes v1.10 or later.
1. With necessary storage drivers/provisioners installed.
1. `kubectl` installed locally.
## Limitations
These are the current set of limitations with the E2E tests.
1. E2E tests only accepts credentials only for a single provider and for that reason, only tests for a single provider can be run at a time.
1. Each E2E test suite installs an instance of Velero to run tests and uninstalls it after test completion. It is possible that a test suite may be installing Velero while another may be uninstalling Velero. This race condition can lead to tests being flaky and cause false negatives. The options for resolving this are:
1. Make each test suite setup wait for Velero to be uninstalled before attempting to install as part of its setup.
1. Make each test suite install Velero in a different namespace.
## Configuration for E2E tests
Below is a list of the configuration used by E2E tests.
These configuration parameters are expected as values to the following command line flags:
1. `-credentials-file`: File containing credentials for backup and volume provider. Required.
1. `-bucket`: Name of the object storage bucket where backups from e2e tests should be stored. Required.
1. `-plugin-provider`: Provider of object store and volume snapshotter plugins. Required.
1. `-velerocli`: Path to the velero application to use. Optional, by default uses `velero` in the `$PATH`
1. `-velero-image`: Image for the velero server to be tested. Optional, by default uses `velero/velero:main`
1. `-bsl-config`: Configuration to use for the backup storage location. Format is key1=value1,key2=value2. Optional.
1. `-prefix`: Prefix in the `bucket`, under which all Velero data should be stored within the bucket. Optional.
1. `-vsl-config`: Configuration to use for the volume snapshot location. Format is key1=value1,key2=value2. Optional.
These configurations or parameters are used to generate install options for Velero for each test suite.
## Running tests locally ## Running tests locally
1. From Velero repository root ### Running using `make`
``` E2E tests can be run from the Velero repository root by running `make test-e2e`. While running E2E tests using `make` the E2E test configuration values are passed using `make` variables.
make test-e2e
```
1. From `test/e2e/` directory Below is a mapping between `make` variables to E2E configuration flags.
1. `CREDS_FILE`: `-credentials-file`. Required.
1. `BSL_BUCKET`: `-bucket`. Required.
1. `PLUGIN_PROVIDER`: `-plugin-provider`. Required.
1. `VELERO_CLI`: the `-velerocli`. Optional.
1. `VELERO_IMAGE`: the `-velero-image`. Optional.
1. `BSL_PREFIX`: `-prefix`. Optional.
1. `BSL_CONFIG`: `-bsl-config`. Optional.
1. `VSL_CONFIG`: `-vsl-config`. Optional.
``` For example, E2E tests can be run from Velero repository roots using the below command:
make run
```
## Running tests based on cloud platforms
1. Running Velero E2E tests on KinD
1. Run Velero tests using AWS as the storage provider:
```bash
BSL_PREFIX=<PREFIX_UNDER_BUCKET> BSL_BUCKET=<BUCKET_FOR_E2E_TEST_BACKUP> CREDS_FILE=/path/to/aws-creds PLUGIN_PROVIDER=aws make test-e2e
``` ```
CLOUD_PLATFORM=kind make test-e2e 1. Run Velero tests using Microsoft Azure as the storage provider:
```bash
BSL_CONFIG="resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID" BSL_BUCKET=velero CREDS_FILE=~/bin/velero-dev/aks-creds PLUGIN_PROVIDER=azure make test-e2e
``` ```
Please refer to `velero-plugin-for-microsoft-azure` documentation for instruction to [set up permissions for Velero](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#set-permissions-for-velero) and to [set up azure storage account and blob container](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#setup-azure-storage-account-and-blob-container)
1. Running Velero E2E tests on AWS ## Filtering tests
``` Velero E2E tests uses [Ginkgo](https://onsi.github.io/ginkgo/) testing framework which allows a subset of the tests to be run using the [`-focus` and `-skip`](https://onsi.github.io/ginkgo/#focused-specs) flags to ginkgo.
CLOUD_PLATFORM=aws make test-e2e
```
1. Running Velero E2E tests on Azure The `-focus` flag is passed to ginkgo using the `GINKGO_FOCUS` make variable. This can be used to focus on specific tests.
```
CLOUD_PLATFORM=azure make test-e2e
```

View File

@ -1,24 +0,0 @@
package e2e
import (
"context"
"flag"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Testing Velero on an aws cluster", func() {
BeforeEach(func() {
flag.Parse()
ctx := context.TODO()
err := EnsureClusterExists(ctx)
Expect(err).NotTo(HaveOccurred())
})
Describe("", func() {
Context("Dummy test", func() {
It("is a dummy test", func() {
})
})
})
})

View File

@ -1,24 +0,0 @@
package e2e
import (
"context"
"flag"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Testing Velero on an azure cluster", func() {
BeforeEach(func() {
flag.Parse()
ctx := context.TODO()
err := EnsureClusterExists(ctx)
Expect(err).NotTo(HaveOccurred())
})
Describe("Dummy test", func() {
Context("Dummy test", func() {
It("is a dummy test", func() {
})
})
})
})

View File

@ -2,6 +2,7 @@ package e2e
import ( import (
"flag" "flag"
"fmt"
"time" "time"
"github.com/google/uuid" "github.com/google/uuid"
@ -9,75 +10,54 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"golang.org/x/net/context" "golang.org/x/net/context"
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
) )
var ( var (
backupName string uuidgen uuid.UUID
restoreName string veleroInstallOptions *install.InstallOptions
) )
var _ = Describe("Backup Restore test using Kibishii to generate/verify data", func() { func veleroInstall(pluginProvider string, useRestic bool) {
var err error
flag.Parse()
Expect(EnsureClusterExists(context.TODO())).To(Succeed(), "Failed to ensure kubernetes cluster exists")
uuidgen, err = uuid.NewRandom()
Expect(err).To(Succeed())
veleroInstallOptions, err = GetProviderVeleroInstallOptions(pluginProvider, cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, vslConfig, getProviderPlugins(pluginProvider))
Expect(err).To(Succeed(), fmt.Sprintf("Failed to get Velero InstallOptions for plugin provider %s", pluginProvider))
veleroInstallOptions.UseRestic = useRestic
Expect(InstallVeleroServer(veleroInstallOptions)).To(Succeed(), "Failed to install Velero on KinD cluster")
}
// Test backup and restore of Kibishi using restic
var _ = Describe("[Restic] [KinD] Velero tests on KinD cluster using the plugin provider for object storage and Restic for volume backups", func() {
var (
client *kubernetes.Clientset
backupName string
restoreName string
)
BeforeEach(func() { BeforeEach(func() {
flag.Parse() var err error
veleroInstall(pluginProvider, true)
client, err = GetClusterClient()
Expect(err).To(Succeed(), "Failed to instantiate cluster client")
}) })
Describe("backing up and restoring namespace with data", func() { AfterEach(func() {
Context("when the backup is successful", func() { timeoutCTX, _ := context.WithTimeout(context.Background(), time.Minute)
It("generates data, backups up the namespace, deletes the namespace, restores the namespace and verifies data", func() { err := client.CoreV1().Namespaces().Delete(timeoutCTX, veleroInstallOptions.Namespace, metav1.DeleteOptions{})
backupUUID, err := uuid.NewRandom() Expect(err).To(Succeed())
Expect(err).NotTo(HaveOccurred()) })
backupName = "backup-" + backupUUID.String() Context("When kibishii is the sample workload", func() {
restoreName = "restore-" + backupUUID.String() It("should be successfully backed up and restored", func() {
println("backupName = " + backupName) backupName = "backup-" + uuidgen.String()
println("creating namespace " + kibishiNamespace) restoreName = "restore-" + uuidgen.String()
timeoutCTX, _ := context.WithTimeout(context.Background(), time.Minute) // Even though we are using Velero's CloudProvider plugin for object storage, the kubernetes cluster is running on
err = CreateNamespace(timeoutCTX, kibishiNamespace) // KinD. So use the kind installation for Kibishii.
Expect(err).NotTo(HaveOccurred()) Expect(RunKibishiiTests(client, "kind", veleroCLI, backupName, restoreName)).To(Succeed(), "Failed to successfully backup and restore Kibishii namespace")
println("installing kibishii in namespace " + kibishiNamespace)
timeoutCTX, _ = context.WithTimeout(context.Background(), 30*time.Minute)
err = InstallKibishii(timeoutCTX, kibishiNamespace, cloudPlatform)
Expect(err).NotTo(HaveOccurred())
println("running kibishii generate")
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute*60)
err = GenerateData(timeoutCTX, kibishiNamespace, 2, 10, 10, 1024, 1024, 0, 2)
Expect(err).NotTo(HaveOccurred())
println("executing backup")
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute*30)
err = BackupNamespace(timeoutCTX, veleroCLI, backupName, kibishiNamespace)
Expect(err).NotTo(HaveOccurred())
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute)
err = CheckBackupPhase(timeoutCTX, veleroCLI, backupName, velerov1.BackupPhaseCompleted)
Expect(err).NotTo(HaveOccurred())
println("removing namespace " + kibishiNamespace)
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute)
err = RemoveNamespace(timeoutCTX, kibishiNamespace)
Expect(err).NotTo(HaveOccurred())
println("restoring namespace")
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute*30)
err = RestoreNamespace(timeoutCTX, veleroCLI, restoreName, backupName)
Expect(err).NotTo(HaveOccurred())
println("Checking that namespace is present")
// TODO - check that namespace exists
println("running kibishii verify")
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute*60)
err = VerifyData(timeoutCTX, kibishiNamespace, 2, 10, 10, 1024, 1024, 0, 2)
Expect(err).NotTo(HaveOccurred())
println("removing namespace " + kibishiNamespace)
timeoutCTX, _ = context.WithTimeout(context.Background(), time.Minute)
err = RemoveNamespace(timeoutCTX, kibishiNamespace)
Expect(err).NotTo(HaveOccurred())
})
}) })
}) })
}) })

View File

@ -3,25 +3,44 @@ package e2e
import ( import (
"os/exec" "os/exec"
"github.com/pkg/errors"
"golang.org/x/net/context" "golang.org/x/net/context"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/vmware-tanzu/velero/pkg/builder"
) )
// EnsureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on.
func EnsureClusterExists(ctx context.Context) error { func EnsureClusterExists(ctx context.Context) error {
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run() return exec.CommandContext(ctx, "kubectl", "cluster-info").Run()
} }
func CreateNamespace(ctx context.Context, namespace string) error { // GetClusterClient instantiates and returns a client for the cluster.
// TODO - should we talk directly to the API server? func GetClusterClient() (*kubernetes.Clientset, error) {
err := exec.CommandContext(ctx, "kubectl", "create", "namespace", namespace).Run() loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
return err configOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
clientConfig, err := kubeConfig.ClientConfig()
if err != nil {
return nil, errors.WithStack(err)
}
client, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
return client, nil
} }
func RemoveNamespace(ctx context.Context, namespace string) error { // CreateNamespace creates a kubernetes namespace
// TODO - should we talk directly to the API server? func CreateNamespace(ctx context.Context, client *kubernetes.Clientset, namespace string) error {
err := exec.CommandContext(ctx, "kubectl", "delete", "namespace", namespace).Run() ns := builder.ForNamespace(namespace).Result()
_, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
}
return err return err
} }
func NamespaceExists(ctx context.Context, namespace string) (bool, error) {
return false, nil
}

View File

@ -9,18 +9,28 @@ import (
) )
var ( var (
veleroCLI string veleroCLI, veleroImage, cloudCredentialsFile, bslConfig, bslBucket, bslPrefix, vslConfig, pluginProvider string
kibishiNamespace string
cloudPlatform string // aws, vsphere, azure
) )
func init() { func init() {
flag.StringVar(&veleroCLI, "velerocli", "velero", "path to the velero application to use") flag.StringVar(&pluginProvider, "plugin-provider", "", "Provider of object store and volume snapshotter plugins. Required.")
flag.StringVar(&kibishiNamespace, "kibishiins", "kibishii", "namespace to use for Kibishii distributed data generator") flag.StringVar(&bslBucket, "bucket", "", "name of the object storage bucket where backups from e2e tests should be stored. Required.")
flag.StringVar(&cloudPlatform, "cloudplatform", "aws", "cloud platform we are deploying on (aws, vsphere, azure)") flag.StringVar(&cloudCredentialsFile, "credentials-file", "", "file containing credentials for backup and volume provider. Required.")
flag.StringVar(&veleroCLI, "velerocli", "velero", "path to the velero application to use.")
flag.StringVar(&veleroImage, "velero-image", "velero/velero:main", "image for the velero server to be tested.")
flag.StringVar(&bslConfig, "bsl-config", "", "configuration to use for the backup storage location. Format is key1=value1,key2=value2")
flag.StringVar(&bslPrefix, "prefix", "", "prefix under which all Velero data should be stored within the bucket. Optional.")
flag.StringVar(&vslConfig, "vsl-config", "", "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2")
} }
func TestE2e(t *testing.T) { func TestE2e(t *testing.T) {
// Skip running E2E tests when running only "short" tests because:
// 1. E2E tests are long running tests involving installation of Velero and performing backup and restore operations.
// 2. E2E tests require a kubernetes cluster to install and run velero which further requires ore configuration. See above referenced command line flags.
if testing.Short() {
t.Skip("Skipping E2E tests")
}
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "E2e Suite") RunSpecs(t, "E2e Suite")
} }

117
test/e2e/kibishii_tests.go Normal file
View File

@ -0,0 +1,117 @@
package e2e
import (
"fmt"
"os/exec"
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/pkg/errors"
"golang.org/x/net/context"
"k8s.io/client-go/kubernetes"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
)
const (
kibishiiNamespace = "kibishii-workload"
)
func installKibishii(ctx context.Context, namespace string, cloudPlatform string) error {
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
"github.com/vmware-tanzu-labs/distributed-data-generator/kubernetes/yaml/"+cloudPlatform)
_, _, err := veleroexec.RunCommand(kibishiiInstallCmd)
if err != nil {
return errors.Wrap(err, "failed to install kibishii")
}
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
"-n", namespace, "-w", "--timeout=30m")
_, _, err = veleroexec.RunCommand(kibishiiSetWaitCmd)
if err != nil {
return err
}
jumpPadWaitCmd := exec.CommandContext(ctx, "kubectl", "wait", "--for=condition=ready", "-n", namespace, "pod/jump-pad")
_, _, err = veleroexec.RunCommand(jumpPadWaitCmd)
return err
}
func generateData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
blockSize int, passNum int, expectedNodes int) error {
kibishiiGenerateCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/generate.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)
_, _, err := veleroexec.RunCommand(kibishiiGenerateCmd)
if err != nil {
return errors.Wrap(err, "failed to generate")
}
return nil
}
func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
blockSize int, passNum int, expectedNodes int) error {
kibishiiVerifyCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/verify.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
_, _, err := veleroexec.RunCommand(kibishiiVerifyCmd)
if err != nil {
return errors.Wrap(err, "failed to verify")
}
return nil
}
// RunKibishiiTests runs kibishii tests on the provider.
func RunKibishiiTests(client *kubernetes.Clientset, providerName, veleroCLI, backupName, restoreName string) error {
fiveMinTimeout, _ := context.WithTimeout(context.Background(), 5*time.Minute)
oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60)
if err := CreateNamespace(fiveMinTimeout, client, kibishiiNamespace); err != nil {
errors.Wrapf(err, "Failed to create namespace %s to install Kibishii workload", kibishiiNamespace)
}
if err := installKibishii(fiveMinTimeout, kibishiiNamespace, providerName); err != nil {
errors.Wrap(err, "Failed to install Kibishii workload")
}
if err := generateData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil {
return errors.Wrap(err, "Failed to generate data")
}
if err := VeleroBackupNamespace(oneHourTimeout, veleroCLI, backupName, kibishiiNamespace); err != nil {
return errors.Wrapf(err, "Failed to backup kibishii namespace %s", kibishiiNamespace)
}
fmt.Printf("Simulating a disaster by removing namespace %s\n", kibishiiNamespace)
if err := client.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrap(err, "Failed to simulate a disaster")
}
if err := VeleroRestore(oneHourTimeout, veleroCLI, restoreName, backupName); err != nil {
return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName)
}
// TODO - check that namespace exists
fmt.Printf("running kibishii verify\n")
if err := verifyData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil {
return errors.Wrap(err, "Failed to verify data generated by kibishii")
}
if err := client.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed to cleanup %s wrokload namespace", kibishiiNamespace)
}
// kibishii test completed successfully
return nil
}

View File

@ -1,145 +0,0 @@
package e2e
import (
"bufio"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func InstallKibishii(ctx context.Context, namespace string, cloudPlatform string) error {
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
"github.com/vmware-tanzu-labs/distributed-data-generator/kubernetes/yaml/"+cloudPlatform)
stdoutPipe, err := kibishiiInstallCmd.StdoutPipe()
if err != nil {
return err
}
err = kibishiiInstallCmd.Start()
if err != nil {
return err
}
defer stdoutPipe.Close()
// copy the data written to the PipeReader via the cmd to stdout
_, err = io.Copy(os.Stdout, stdoutPipe)
if err != nil {
return err
}
err = kibishiiInstallCmd.Wait()
if err != nil {
return err
}
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
"-n", namespace, "-w", "--timeout=30m")
err = kibishiiSetWaitCmd.Run()
if err != nil {
return err
}
jumpPadWaitCmd := exec.CommandContext(ctx, "kubectl", "wait", "--for=condition=ready", "-n", namespace, "pod/jump-pad")
err = jumpPadWaitCmd.Run()
return err
}
func GenerateData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
blockSize int, passNum int, expectedNodes int) error {
kibishiiGenerateCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/generate.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)
stdoutPipe, err := kibishiiGenerateCmd.StdoutPipe()
if err != nil {
return err
}
err = kibishiiGenerateCmd.Start()
if err != nil {
return err
}
defer stdoutPipe.Close()
stdoutReader := bufio.NewReader(stdoutPipe)
var readErr error
for true {
buf, isPrefix, err := stdoutReader.ReadLine()
if err != nil {
readErr = err
break
} else {
if isPrefix {
readErr = errors.New("line returned exceeded max length")
break
}
line := strings.TrimSpace(string(buf))
if line == "success" {
break
}
if line == "failed" {
readErr = errors.New("generate failed")
break
}
fmt.Println(line)
}
}
err = kibishiiGenerateCmd.Wait()
if readErr != nil {
err = readErr // Squash the Wait err, the read error is probably more interesting
}
return err
}
func VerifyData(ctx context.Context, namespace string, levels int, filesPerLevel int, dirsPerLevel int, fileSize int,
blockSize int, passNum int, expectedNodes int) error {
kibishiiVerifyCmd := exec.CommandContext(ctx, "kubectl", "exec", "-n", namespace, "jump-pad", "--",
"/usr/local/bin/verify.sh", strconv.Itoa(levels), strconv.Itoa(filesPerLevel), strconv.Itoa(dirsPerLevel), strconv.Itoa(fileSize),
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
stdoutPipe, err := kibishiiVerifyCmd.StdoutPipe()
if err != nil {
return err
}
err = kibishiiVerifyCmd.Start()
if err != nil {
return err
}
defer stdoutPipe.Close()
stdoutReader := bufio.NewReader(stdoutPipe)
var readErr error
for true {
buf, isPrefix, err := stdoutReader.ReadLine()
if err != nil {
readErr = err
break
} else {
if isPrefix {
readErr = errors.New("line returned exceeded max length")
break
}
line := strings.TrimSpace(string(buf))
if line == "success" {
break
}
if line == "failed" {
readErr = errors.New("generate failed")
break
}
fmt.Println(line)
}
}
err = kibishiiVerifyCmd.Wait()
if readErr != nil {
err = readErr // Squash the Wait err, the read error is probably more interesting
}
return err
}

View File

@ -13,7 +13,7 @@ var _ = Describe("Testing Velero on a kind cluster", func() {
flag.Parse() flag.Parse()
ctx := context.TODO() ctx := context.TODO()
err := EnsureClusterExists(ctx) err := EnsureClusterExists(ctx)
Expect(err).NotTo(HaveOccurred()) Expect(err).To(Succeed())
}) })
Describe("Dummy test", func() { Describe("Dummy test", func() {
Context("Dummy test", func() { Context("Dummy test", func() {

View File

@ -5,14 +5,112 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"os"
"os/exec" "os/exec"
"path/filepath"
"github.com/pkg/errors" "github.com/pkg/errors"
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/client"
cliinstall "github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
"github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
"github.com/vmware-tanzu/velero/pkg/install"
) )
func CheckBackupPhase(ctx context.Context, veleroCLI string, backupName string, expectedPhase v1.BackupPhase) error { func getProviderPlugins(providerName string) []string {
// TODO: make plugin images configurable
switch providerName {
case "aws":
return []string{"velero/velero-plugin-for-aws:v1.1.0"}
case "azure":
return []string{"velero/velero-plugin-for-microsoft-azure:v1.1.1"}
case "vsphere":
return []string{"velero/velero-plugin-for-aws:v1.1.0", "velero/velero-plugin-for-vsphere:v1.0.2"}
default:
return []string{""}
}
}
// GetProviderVeleroInstallOptions returns Velero InstallOptions for the provider.
func GetProviderVeleroInstallOptions(providerName, credentialsFile, objectStoreBucket, objectStorePrefix string,
bslConfig, vslConfig string,
plugins []string,
) (*cliinstall.InstallOptions, error) {
if credentialsFile == "" {
return nil, errors.Errorf("No credentials were supplied to use for E2E tests")
}
realPath, err := filepath.Abs(credentialsFile)
if err != nil {
return nil, err
}
io := cliinstall.NewInstallOptions()
// always wait for velero and restic pods to be running.
io.Wait = true
io.ProviderName = providerName
io.SecretFile = credentialsFile
io.BucketName = objectStoreBucket
io.Prefix = objectStorePrefix
io.BackupStorageConfig = flag.NewMap()
io.BackupStorageConfig.Set(bslConfig)
io.VolumeSnapshotConfig = flag.NewMap()
io.VolumeSnapshotConfig.Set(vslConfig)
io.SecretFile = realPath
io.Plugins = flag.NewStringArray(plugins...)
return io, nil
}
// InstallVeleroServer installs velero in the cluster.
func InstallVeleroServer(io *cliinstall.InstallOptions) error {
config, err := client.LoadConfig()
if err != nil {
return err
}
vo, err := io.AsVeleroOptions()
if err != nil {
return errors.Wrap(err, "Failed to translate InstallOptions to VeleroOptions for Velero")
}
f := client.NewFactory("e2e", config)
resources, err := install.AllResources(vo)
if err != nil {
return errors.Wrap(err, "Failed to install Velero in the cluster")
}
dynamicClient, err := f.DynamicClient()
if err != nil {
return err
}
factory := client.NewDynamicFactory(dynamicClient)
errorMsg := "\n\nError installing Velero. Use `kubectl logs deploy/velero -n velero` to check the deploy logs"
err = install.Install(factory, resources, os.Stdout)
if err != nil {
return errors.Wrap(err, errorMsg)
}
fmt.Println("Waiting for Velero deployment to be ready.")
if _, err = install.DeploymentIsReady(factory, "velero"); err != nil {
return errors.Wrap(err, errorMsg)
}
// restic enabled by default
fmt.Println("Waiting for Velero restic daemonset to be ready.")
if _, err = install.DaemonSetIsReady(factory, "velero"); err != nil {
return errors.Wrap(err, errorMsg)
}
return nil
}
// CheckBackupPhase uses veleroCLI to inspect the phase of a Velero backup.
func CheckBackupPhase(ctx context.Context, veleroCLI string, backupName string, expectedPhase velerov1api.BackupPhase) error {
checkCMD := exec.CommandContext(ctx, veleroCLI, "backup", "get", "-o", "json", backupName) checkCMD := exec.CommandContext(ctx, veleroCLI, "backup", "get", "-o", "json", backupName)
fmt.Printf("get backup cmd =%v\n", checkCMD) fmt.Printf("get backup cmd =%v\n", checkCMD)
stdoutPipe, err := checkCMD.StdoutPipe() stdoutPipe, err := checkCMD.StdoutPipe()
@ -41,7 +139,7 @@ func CheckBackupPhase(ctx context.Context, veleroCLI string, backupName string,
if err != nil { if err != nil {
return err return err
} }
backup := v1.Backup{} backup := velerov1api.Backup{}
err = json.Unmarshal(jsonBuf, &backup) err = json.Unmarshal(jsonBuf, &backup)
if err != nil { if err != nil {
return err return err
@ -52,7 +150,8 @@ func CheckBackupPhase(ctx context.Context, veleroCLI string, backupName string,
return nil return nil
} }
func CheckRestorePhase(ctx context.Context, veleroCLI string, restoreName string, expectedPhase v1.RestorePhase) error { // CheckRestorePhase uses veleroCLI to inspect the phase of a Velero restore.
func CheckRestorePhase(ctx context.Context, veleroCLI string, restoreName string, expectedPhase velerov1api.RestorePhase) error {
checkCMD := exec.CommandContext(ctx, veleroCLI, "restore", "get", "-o", "json", restoreName) checkCMD := exec.CommandContext(ctx, veleroCLI, "restore", "get", "-o", "json", restoreName)
fmt.Printf("get restore cmd =%v\n", checkCMD) fmt.Printf("get restore cmd =%v\n", checkCMD)
stdoutPipe, err := checkCMD.StdoutPipe() stdoutPipe, err := checkCMD.StdoutPipe()
@ -81,7 +180,7 @@ func CheckRestorePhase(ctx context.Context, veleroCLI string, restoreName string
if err != nil { if err != nil {
return err return err
} }
restore := v1.Restore{} restore := velerov1api.Restore{}
err = json.Unmarshal(jsonBuf, &restore) err = json.Unmarshal(jsonBuf, &restore)
if err != nil { if err != nil {
return err return err
@ -92,7 +191,8 @@ func CheckRestorePhase(ctx context.Context, veleroCLI string, restoreName string
return nil return nil
} }
func BackupNamespace(ctx context.Context, veleroCLI string, backupName string, namespace string) error { // VeleroBackupNamespace uses the veleroCLI to backup a namespace.
func VeleroBackupNamespace(ctx context.Context, veleroCLI string, backupName string, namespace string) error {
backupCmd := exec.CommandContext(ctx, veleroCLI, "create", "backup", backupName, "--include-namespaces", namespace, backupCmd := exec.CommandContext(ctx, veleroCLI, "create", "backup", backupName, "--include-namespaces", namespace,
"--default-volumes-to-restic", "--wait") "--default-volumes-to-restic", "--wait")
fmt.Printf("backup cmd =%v\n", backupCmd) fmt.Printf("backup cmd =%v\n", backupCmd)
@ -100,15 +200,16 @@ func BackupNamespace(ctx context.Context, veleroCLI string, backupName string, n
if err != nil { if err != nil {
return err return err
} }
return CheckBackupPhase(ctx, veleroCLI, backupName, v1.BackupPhaseCompleted) return CheckBackupPhase(ctx, veleroCLI, backupName, velerov1api.BackupPhaseCompleted)
} }
func RestoreNamespace(ctx context.Context, veleroCLI string, restoreName string, backupName string) error { // VeleroRestore uses the veleroCLI to restore from a Velero backup.
func VeleroRestore(ctx context.Context, veleroCLI string, restoreName string, backupName string) error {
restoreCmd := exec.CommandContext(ctx, veleroCLI, "create", "restore", restoreName, "--from-backup", backupName, "--wait") restoreCmd := exec.CommandContext(ctx, veleroCLI, "create", "restore", restoreName, "--from-backup", backupName, "--wait")
fmt.Printf("restore cmd =%v\n", restoreCmd) fmt.Printf("restore cmd =%v\n", restoreCmd)
err := restoreCmd.Run() err := restoreCmd.Run()
if err != nil { if err != nil {
return err return err
} }
return CheckRestorePhase(ctx, veleroCLI, restoreName, v1.RestorePhaseCompleted) return CheckRestorePhase(ctx, veleroCLI, restoreName, velerov1api.RestorePhaseCompleted)
} }