refactor download request controller test and add test cases
Signed-off-by: Steve Kriss <steve@heptio.com>pull/799/head
parent
8f5346150c
commit
7007f198e1
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -26,223 +25,295 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
type downloadRequestTestHarness struct {
|
||||
client *fake.Clientset
|
||||
informerFactory informers.SharedInformerFactory
|
||||
pluginManager *pluginmocks.Manager
|
||||
objectStore *arktest.ObjectStore
|
||||
|
||||
controller *downloadRequestController
|
||||
}
|
||||
|
||||
func newDownloadRequestTestHarness(t *testing.T) *downloadRequestTestHarness {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
informerFactory = informers.NewSharedInformerFactory(client, 0)
|
||||
pluginManager = new(pluginmocks.Manager)
|
||||
objectStore = new(arktest.ObjectStore)
|
||||
controller = NewDownloadRequestController(
|
||||
client.ArkV1(),
|
||||
informerFactory.Ark().V1().DownloadRequests(),
|
||||
informerFactory.Ark().V1().Restores(),
|
||||
informerFactory.Ark().V1().BackupStorageLocations(),
|
||||
informerFactory.Ark().V1().Backups(),
|
||||
nil,
|
||||
arktest.NewLogger(),
|
||||
).(*downloadRequestController)
|
||||
)
|
||||
|
||||
clockTime, err := time.Parse(time.RFC1123, time.RFC1123)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller.clock = clock.NewFakeClock(clockTime)
|
||||
|
||||
controller.newPluginManager = func(_ logrus.FieldLogger, _ logrus.Level, _ plugin.Registry) plugin.Manager {
|
||||
return pluginManager
|
||||
}
|
||||
|
||||
pluginManager.On("CleanupClients").Return()
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
return &downloadRequestTestHarness{
|
||||
client: client,
|
||||
informerFactory: informerFactory,
|
||||
pluginManager: pluginManager,
|
||||
objectStore: objectStore,
|
||||
controller: controller,
|
||||
}
|
||||
}
|
||||
|
||||
func newDownloadRequest(phase v1.DownloadRequestPhase, targetKind v1.DownloadTargetKind, targetName string) *v1.DownloadRequest {
|
||||
return &v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "a-download-request",
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: v1.DownloadTarget{
|
||||
Kind: targetKind,
|
||||
Name: targetName,
|
||||
},
|
||||
},
|
||||
Status: v1.DownloadRequestStatus{
|
||||
Phase: phase,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newBackupLocation(name, provider, bucket string) *v1.BackupStorageLocation {
|
||||
return &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: provider,
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: bucket,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDownloadRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
phase v1.DownloadRequestPhase
|
||||
targetKind v1.DownloadTargetKind
|
||||
targetName string
|
||||
downloadRequest *v1.DownloadRequest
|
||||
backup *v1.Backup
|
||||
restore *v1.Restore
|
||||
expectedError string
|
||||
expectedDir string
|
||||
expectedPhase v1.DownloadRequestPhase
|
||||
expectedURL string
|
||||
backupLocation *v1.BackupStorageLocation
|
||||
expired bool
|
||||
expectedErr string
|
||||
expectedRequestedObject string
|
||||
}{
|
||||
{
|
||||
name: "empty key",
|
||||
name: "empty key returns without error",
|
||||
key: "",
|
||||
},
|
||||
{
|
||||
name: "bad key format",
|
||||
name: "bad key format returns without error",
|
||||
key: "a/b/c",
|
||||
},
|
||||
{
|
||||
name: "no download request for key returns without error",
|
||||
key: "nonexistent/key",
|
||||
},
|
||||
{
|
||||
name: "backup contents request for nonexistent backup returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "backup.ark.heptio.com \"a-backup\" not found",
|
||||
},
|
||||
{
|
||||
name: "restore log request for nonexistent restore returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "error getting Restore: restore.ark.heptio.com \"a-backup-20170912150214\" not found",
|
||||
},
|
||||
{
|
||||
name: "backup contents request for backup with nonexistent location returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("non-matching-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "backupstoragelocation.ark.heptio.com \"a-location\" not found",
|
||||
},
|
||||
{
|
||||
name: "backup contents request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup.tar.gz",
|
||||
},
|
||||
{
|
||||
name: "backup contents request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup.tar.gz",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase '' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: "",
|
||||
targetKind: v1.DownloadTargetKindBackupLog,
|
||||
targetName: "backup1",
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupLog, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase 'New' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: v1.DownloadRequestPhaseNew,
|
||||
targetKind: v1.DownloadTargetKindBackupLog,
|
||||
targetName: "backup1",
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupLog, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase '' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: "",
|
||||
targetKind: v1.DownloadTargetKindRestoreLog,
|
||||
targetName: "backup1-20170912150214",
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase New gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: v1.DownloadRequestPhaseNew,
|
||||
targetKind: v1.DownloadTargetKindRestoreLog,
|
||||
targetName: "backup1-20170912150214",
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
name: "restore log request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
|
||||
},
|
||||
{
|
||||
name: "request with phase 'Processed' is not deleted if not expired",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
},
|
||||
{
|
||||
name: "request with phase 'Processed' is deleted if expired",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
expired: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
|
||||
restoresInformer = sharedInformers.Ark().V1().Restores()
|
||||
logger = arktest.NewLogger()
|
||||
clockTime, _ = time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006")
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
objectStore = &arktest.ObjectStore{}
|
||||
)
|
||||
harness := newDownloadRequestTestHarness(t)
|
||||
|
||||
c := NewDownloadRequestController(
|
||||
client.ArkV1(),
|
||||
downloadRequestsInformer,
|
||||
restoresInformer,
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
nil, // pluginRegistry
|
||||
logger,
|
||||
).(*downloadRequestController)
|
||||
// set up test case data
|
||||
|
||||
c.newPluginManager = func(_ logrus.FieldLogger, _ logrus.Level, _ plugin.Registry) plugin.Manager {
|
||||
return pluginManager
|
||||
// Set .status.expiration properly for processed requests. Since "expired" is relative to the controller's
|
||||
// clock time, it's easier to do this here than as part of the test case definitions.
|
||||
if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
|
||||
if tc.expired {
|
||||
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(-1 * time.Minute)
|
||||
} else {
|
||||
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
pluginManager.On("GetObjectStore", "objStoreProvider").Return(objectStore, nil)
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
if tc.downloadRequest != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().DownloadRequests().Informer().GetStore().Add(tc.downloadRequest))
|
||||
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
c.clock = clock.NewFakeClock(clockTime)
|
||||
|
||||
var downloadRequest *v1.DownloadRequest
|
||||
|
||||
if tc.expectedPhase == v1.DownloadRequestPhaseProcessed {
|
||||
expectedTarget := v1.DownloadTarget{
|
||||
Kind: tc.targetKind,
|
||||
Name: tc.targetName,
|
||||
_, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Create(tc.downloadRequest)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
downloadRequest = &v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "dr1",
|
||||
},
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: expectedTarget,
|
||||
},
|
||||
}
|
||||
downloadRequestsInformer.Informer().GetStore().Add(downloadRequest)
|
||||
|
||||
if tc.restore != nil {
|
||||
restoresInformer.Informer().GetStore().Add(tc.restore)
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().Restores().Informer().GetStore().Add(tc.restore))
|
||||
}
|
||||
|
||||
if tc.expectedDir != "" {
|
||||
backup := &v1.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.expectedDir,
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
}
|
||||
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup))
|
||||
|
||||
location := &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: backup.Spec.StorageLocation,
|
||||
Namespace: backup.Namespace,
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location))
|
||||
if tc.backup != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().Backups().Informer().GetStore().Add(tc.backup))
|
||||
}
|
||||
|
||||
c.createSignedURL = func(objectStore cloudprovider.ObjectStore, target v1.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error) {
|
||||
require.Equal(t, expectedTarget, target)
|
||||
require.Equal(t, "bucket", bucket)
|
||||
require.Equal(t, tc.expectedDir, directory)
|
||||
require.Equal(t, 10*time.Minute, ttl)
|
||||
return "signedURL", nil
|
||||
}
|
||||
if tc.backupLocation != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(tc.backupLocation))
|
||||
|
||||
harness.pluginManager.On("GetObjectStore", tc.backupLocation.Spec.Provider).Return(harness.objectStore, nil)
|
||||
}
|
||||
|
||||
// method under test
|
||||
err := c.processDownloadRequest(tc.key)
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.EqualError(t, err, tc.expectedError)
|
||||
return
|
||||
if tc.expectedRequestedObject != "" {
|
||||
harness.objectStore.On("CreateSignedURL", tc.backupLocation.Spec.ObjectStorage.Bucket, tc.expectedRequestedObject, mock.Anything).Return("a-url", nil)
|
||||
}
|
||||
|
||||
// exercise method under test
|
||||
key := tc.key
|
||||
if key == "" && tc.downloadRequest != nil {
|
||||
key = kubeutil.NamespaceAndName(tc.downloadRequest)
|
||||
}
|
||||
|
||||
err := harness.controller.processDownloadRequest(key)
|
||||
|
||||
// verify results
|
||||
if tc.expectedErr != "" {
|
||||
require.Equal(t, tc.expectedErr, err.Error())
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
if tc.expectedRequestedObject != "" {
|
||||
output, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
actions := client.Actions()
|
||||
|
||||
// if we don't expect a phase update, this means
|
||||
// we don't expect any actions to take place
|
||||
if tc.expectedPhase == "" {
|
||||
require.Equal(t, 0, len(actions))
|
||||
return
|
||||
assert.Equal(t, string(v1.DownloadRequestPhaseProcessed), string(output.Status.Phase))
|
||||
assert.Equal(t, "a-url", output.Status.DownloadURL)
|
||||
assert.True(t, arktest.TimesAreEqual(harness.controller.clock.Now().Add(signedURLTTL), output.Status.Expiration.Time), "expiration does not match")
|
||||
}
|
||||
|
||||
// otherwise, we should get exactly 1 patch
|
||||
require.Equal(t, 1, len(actions))
|
||||
if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
|
||||
res, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
|
||||
|
||||
type PatchStatus struct {
|
||||
DownloadURL string `json:"downloadURL"`
|
||||
Phase v1.DownloadRequestPhase `json:"phase"`
|
||||
Expiration time.Time `json:"expiration"`
|
||||
if tc.expired {
|
||||
assert.True(t, apierrors.IsNotFound(err))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.downloadRequest, res)
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Status PatchStatus `json:"status"`
|
||||
}
|
||||
|
||||
decode := func(decoder *json.Decoder) (interface{}, error) {
|
||||
actual := new(Patch)
|
||||
err := decoder.Decode(actual)
|
||||
|
||||
return *actual, err
|
||||
}
|
||||
|
||||
expected := Patch{
|
||||
Status: PatchStatus{
|
||||
DownloadURL: tc.expectedURL,
|
||||
Phase: tc.expectedPhase,
|
||||
Expiration: clockTime.Add(signedURLTTL),
|
||||
},
|
||||
}
|
||||
|
||||
arktest.ValidatePatch(t, actions[0], expected, decode)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue