addon: add addon for volcano
parent
5a1038d922
commit
d67dfb3d0a
|
@ -135,6 +135,10 @@ var (
|
|||
//go:embed gcp-auth/*.tmpl gcp-auth/*.yaml
|
||||
GcpAuthAssets embed.FS
|
||||
|
||||
// Volcano assets for volcano addon
|
||||
//go:embed volcano/*.tmpl
|
||||
VolcanoAssets embed.FS
|
||||
|
||||
// VolumeSnapshotsAssets assets for volumesnapshots addon
|
||||
//go:embed volumesnapshots/*.tmpl volumesnapshots/*.yaml
|
||||
VolumeSnapshotsAssets embed.FS
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -191,6 +191,11 @@ var Addons = []*Addon{
|
|||
set: SetBool,
|
||||
callbacks: []setFn{enableOrDisableGCPAuth, EnableOrDisableAddon, verifyGCPAuthAddon},
|
||||
},
|
||||
{
|
||||
name: "volcano",
|
||||
set: SetBool,
|
||||
callbacks: []setFn{EnableOrDisableAddon},
|
||||
},
|
||||
{
|
||||
name: "volumesnapshots",
|
||||
set: SetBool,
|
||||
|
|
|
@ -621,6 +621,21 @@ var Addons = map[string]*Addon{
|
|||
"GCPAuthWebhook": "gcr.io",
|
||||
"KubeWebhookCertgen": "registry.k8s.io",
|
||||
}),
|
||||
"volcano": NewAddon([]*BinAsset{
|
||||
MustBinAsset(addons.VolcanoAssets,
|
||||
"volcano/volcano-development.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"volcano-deployment.yaml",
|
||||
"0640"),
|
||||
}, false, "volcano", "third-party (volcano)", "hwdef", "", map[string]string{
|
||||
"vc_webhook_manager": "volcanosh/vc-webhook-manager:v1.7.0@sha256:082b6a3b7b8b69d98541a8ea56958ef427fdba54ea555870799f8c9ec2754c1b",
|
||||
"vc_controller_manager": "volcanosh/vc-controller-manager:v1.7.0@sha256:1dd0973f67becc3336f009cce4eac8677d857aaf4ba766cfff371ad34dfc34cf",
|
||||
"vc_scheduler": "volcanosh/vc-scheduler:v1.7.0@sha256:64d6efcf1a48366201aafcaf1bd4cb6d66246ec1c395ddb0deefe11350bcebba",
|
||||
}, map[string]string{
|
||||
"vc_webhook_manager": "docker.io",
|
||||
"vc_controller_manager": "docker.io",
|
||||
"vc_scheduler": "docker.io",
|
||||
}),
|
||||
"volumesnapshots": NewAddon([]*BinAsset{
|
||||
// make sure the order of apply. `csi-hostpath-snapshotclass` must be the first position, because it depends on `snapshot.storage.k8s.io_volumesnapshotclasses`
|
||||
// if user disable volumesnapshots addon and delete `csi-hostpath-snapshotclass` after `snapshot.storage.k8s.io_volumesnapshotclasses`, kubernetes will return the error
|
||||
|
|
|
@ -53,6 +53,9 @@ tests the inspektor-gadget addon by ensuring the pod has come up and addon disab
|
|||
#### validateCloudSpannerAddon
|
||||
tests the cloud-spanner addon by ensuring the deployment and pod come up and addon disables
|
||||
|
||||
#### validateVolcanoAddon
|
||||
tests the Volcano addon, makes sure the Volcano is installed into cluster.
|
||||
|
||||
#### validateLocalPathAddon
|
||||
tests the functionality of the storage-provisioner-rancher addon
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -99,7 +100,7 @@ func TestAddons(t *testing.T) {
|
|||
// so we override that here to let minikube auto-detect appropriate cgroup driver
|
||||
os.Setenv(constants.MinikubeForceSystemdEnv, "")
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=storage-provisioner-rancher", "--addons=nvidia-device-plugin", "--addons=yakd"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=storage-provisioner-rancher", "--addons=nvidia-device-plugin", "--addons=yakd", "--addons=volcano"}, StartArgs()...)
|
||||
if !NoneDriver() { // none driver does not support ingress
|
||||
args = append(args, "--addons=ingress", "--addons=ingress-dns")
|
||||
}
|
||||
|
@ -135,6 +136,7 @@ func TestAddons(t *testing.T) {
|
|||
{"LocalPath", validateLocalPathAddon},
|
||||
{"NvidiaDevicePlugin", validateNvidiaDevicePlugin},
|
||||
{"Yakd", validateYakdAddon},
|
||||
{"Volcano", validateVolcanoAddon},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
|
@ -862,6 +864,102 @@ func validateCloudSpannerAddon(ctx context.Context, t *testing.T, profile string
|
|||
}
|
||||
}
|
||||
|
||||
// validateVolcanoAddon tests the Volcano addon, makes sure the Volcano is installed into cluster.
|
||||
func validateVolcanoAddon(ctx context.Context, t *testing.T, profile string) {
|
||||
defer PostMortemLogs(t, profile)
|
||||
|
||||
volcanoNamespace := "volcano-system"
|
||||
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
|
||||
}
|
||||
|
||||
// Wait for the volcano component installation to complete
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(3)
|
||||
go func() {
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, volcanoNamespace, "volcano-scheduler", Minutes(6)); err != nil {
|
||||
t.Errorf("failed waiting for volcano-scheduler deployment to stabilize: %v", err)
|
||||
} else {
|
||||
t.Logf("volcano-scheduler stabilized in %s", time.Since(start))
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, volcanoNamespace, "volcano-admission", Minutes(6)); err != nil {
|
||||
t.Errorf("failed waiting for volcano-admission deployment to stabilize: %v", err)
|
||||
} else {
|
||||
t.Logf("volcano-admission stabilized in %s", time.Since(start))
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
if err := kapi.WaitForDeploymentToStabilize(client, volcanoNamespace, "volcano-controller", Minutes(6)); err != nil {
|
||||
t.Errorf("failed waiting for volcano-controller deployment to stabilize: %v", err)
|
||||
} else {
|
||||
t.Logf("volcano-controller stabilized in %s", time.Since(start))
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, volcanoNamespace, "app=volcano-scheduler", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for app=volcano-scheduler pod: %v", err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, volcanoNamespace, "app=volcano-admission", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for app=volcano-admission pod: %v", err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, volcanoNamespace, "app=volcano-controller", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for app=volcano-controller pod: %v", err)
|
||||
}
|
||||
|
||||
// When the volcano deployment is complete, delete the volcano-admission-init job, it will affect the tests
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "-n", volcanoNamespace, "job", "volcano-admission-init"))
|
||||
if err != nil {
|
||||
t.Logf("vcjob creation with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// Create a vcjob
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "vcjob.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("vcjob creation with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
want := "test-job"
|
||||
checkVolcano := func() error {
|
||||
// check the vcjob
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "vcjob", "-n", "my-volcano"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr.Stderr.String() != "" {
|
||||
t.Logf("%v: unexpected stderr: %s", rr.Command(), rr.Stderr)
|
||||
}
|
||||
if !strings.Contains(rr.Stdout.String(), want) {
|
||||
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), rr.Stdout, want)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.Expo(checkVolcano, time.Second*3, Minutes(2)); err != nil {
|
||||
t.Errorf("failed checking volcano: %v", err.Error())
|
||||
}
|
||||
|
||||
// also ensure the job is actually running
|
||||
if _, err := PodWait(ctx, t, profile, "my-volcano", "volcano.sh/job-name=test-job", Minutes(3)); err != nil {
|
||||
t.Fatalf("failed waiting for test-local-path pod: %v", err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volcano", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to disable volcano addon: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// validateLocalPathAddon tests the functionality of the storage-provisioner-rancher addon
|
||||
func validateLocalPathAddon(ctx context.Context, t *testing.T, profile string) {
|
||||
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: my-volcano
|
||||
|
||||
---
|
||||
|
||||
apiVersion: batch.volcano.sh/v1alpha1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: test-job
|
||||
namespace: my-volcano
|
||||
spec:
|
||||
minAvailable: 1
|
||||
schedulerName: volcano
|
||||
queue: default
|
||||
tasks:
|
||||
- replicas: 1
|
||||
name: nginx
|
||||
policies:
|
||||
- event: TaskCompleted
|
||||
action: CompleteJob
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- sleep
|
||||
- 10m
|
||||
image: nginx:latest
|
||||
name: nginx
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
limits:
|
||||
cpu: 1
|
||||
restartPolicy: Never
|
Loading…
Reference in New Issue