Merge pull request #757 from carlisia/c-538-config-m

Switch Config CRD elements to server flags
pull/783/head
Carlisia 2018-08-16 09:02:48 -07:00 committed by GitHub
commit d29c96387e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 200 additions and 205 deletions

View File

@ -14,10 +14,14 @@ ark server [flags]
### Options
```
-h, --help help for server
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
--metrics-address string the address to expose prometheus metrics (default ":8085")
--plugin-dir string directory containing Ark plugins (default "/plugins")
--backup-sync-period duration how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster (default 1h0m0s)
-h, --help help for server
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
--metrics-address string the address to expose prometheus metrics (default ":8085")
--plugin-dir string directory containing Ark plugins (default "/plugins")
--restic-timeout duration how long backups/restores of pod volumes should be allowed to run before timing out (default 1h0m0s)
--restore-only run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled
--restore-resource-priorities stringSlice desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources (default [namespaces,persistentvolumes,persistentvolumeclaims,secrets,configmaps,serviceaccounts,limitranges,pods])
```
### Options inherited from parent commands

View File

@ -1,23 +1,27 @@
# Ark Config definition
# Ark Config definition and Ark server deployment
* [Overview][8]
* [Example][9]
* [Parameter Reference][6]
* [Main config][7]
* [AWS][0]
* [GCP][1]
* [Azure][2]
* [Config][8]
* [Example][9]
* [Parameter Reference][6]
* [Main config][7]
* [AWS][0]
* [GCP][1]
* [Azure][2]
* [Deployment][11]
* [Sample Deployment][13]
* [Parameter Options][14]
## Overview
## Config
Heptio Ark defines its own Config object (a custom resource) for specifying Ark backup and cloud provider settings. When the Ark server is first deployed, it waits until you create a Config--specifically one named `default`--in the `heptio-ark` namespace.
Heptio Ark defines its own Config object (a custom resource) for specifying Ark backup and cloud provider settings. When the Ark server is first deployed, it waits until you create a Config --specifically one named `default`-- in the `heptio-ark` namespace.
> *NOTE*: There is an underlying assumption that you're running the Ark server as a Kubernetes deployment. If the `default` Config is modified, the server shuts down gracefully. Once the kubelet restarts the Ark server pod, the server then uses the updated Config values.
## Example
### Example
A sample YAML `Config` looks like the following:
```
```yaml
apiVersion: ark.heptio.com/v1
kind: Config
metadata:
@ -32,17 +36,13 @@ backupStorageProvider:
bucket: ark
config:
region: us-west-2
backupSyncPeriod: 60m
gcSyncPeriod: 60m
scheduleSyncPeriod: 1m
restoreOnlyMode: false
```
## Parameter Reference
### Parameter Reference
The configurable parameters are as follows:
### Main config parameters
#### Main config parameters
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
@ -53,17 +53,12 @@ The configurable parameters are as follows:
| `backupStorageProvider/name` | String<br><br>(Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.) | Required Field | The name of the cloud provider that will be used to actually store the backups. |
| `backupStorageProvider/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
| `backupStorageProvider/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for backup storage. |
| `backupSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to make sure that the appropriate Backup resources have been created for existing backup files. |
| `gcSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to delete backup files that have passed their TTL. |
| `scheduleSyncPeriod` | metav1.Duration | 1m0s | How frequently Ark checks its Schedule resource objects to see if a backup needs to be initiated. |
| `resourcePriorities` | []string | `[namespaces, persistentvolumes, persistentvolumeclaims, secrets, configmaps, serviceaccounts, limitranges]` | An ordered list that describes the order in which Kubernetes resource objects should be restored (also specified with the `<RESOURCE>.<GROUP>` format.<br><br>If a resource is not in this list, it is restored after all other prioritized resources. |
| `restoreOnlyMode` | bool | `false` | When RestoreOnly mode is on, functionality for backups, schedules, and expired backup deletion is *turned off*. Restores are made from existing backup files in object storage. |
### AWS
#### AWS
**(Or other S3-compatible storage)**
#### backupStorageProvider/config
##### backupStorageProvider/config
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
@ -72,40 +67,108 @@ The configurable parameters are as follows:
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
#### persistentVolumeProvider/config (AWS Only)
##### persistentVolumeProvider/config (AWS Only)
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
### GCP
#### GCP
#### backupStorageProvider/config
No parameters required.
#### persistentVolumeProvider/config
##### persistentVolumeProvider/config
No parameters required.
### Azure
#### Azure
#### backupStorageProvider/config
##### backupStorageProvider/config
No parameters required.
#### persistentVolumeProvider/config
##### persistentVolumeProvider/config
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
| `apiTimeout` | metav1.Duration | 2m0s | How long to wait for an Azure API request to complete before timeout. |
## Deployment
Heptio Ark also defines its own Deployment object for starting the Ark server on Kubernetes. When the Ark server is deployed, there are specific configurations that might be changed.
### Sample Deployment
A sample YAML `Deployment` looks like the following:
```yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
namespace: heptio-ark
name: ark
spec:
replicas: 1
template:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- server
- --backup-sync-period
- 30m
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
env:
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}
```
### Parameter Options
The list of configurable options for the `ark server` deployment can be found on the [CLI reference][12] document.
[0]: #aws
[1]: #gcp
[2]: #azure
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
[6]: #parameter-reference
[7]: #main-config-parameters
[8]: #overview
[8]: #config
[9]: #example
[10]: http://docs.aws.amazon.com/kms/latest/developerguide/overview.html
[11]: #deployment
[12]: cli-reference/ark_server.md
[13]: #sample-deployment
[14]: #parameter-options

View File

@ -19,7 +19,7 @@ If you periodically back up your cluster's resources, you are able to return to
2. A disaster happens and you need to recreate your resources.
3. Update the [Ark server Config][3], setting `restoreOnlyMode` to `true`. This prevents Backup objects from being created or deleted during your Restore process.
3. Update the [Ark server deployment][3], adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
4. Create a restore with your most recent Ark Backup:
```
@ -50,4 +50,4 @@ ark restore create --from-backup <BACKUP-NAME>
[0]: #disaster-recovery
[1]: #cluster-migration
[3]: config-definition.md#main-config-parameters
[3]: cli-reference/ark_server#options

View File

@ -32,8 +32,4 @@ backupStorageProvider:
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
config:
region: <YOUR_REGION>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m
restoreOnlyMode: false
region: <YOUR_REGION>

View File

@ -30,8 +30,4 @@ backupStorageProvider:
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m
restoreOnlyMode: false
# resticLocation: <YOUR_RESTIC_LOCATION>

View File

@ -29,7 +29,4 @@ backupStorageProvider:
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m
restoreOnlyMode: false

View File

@ -31,7 +31,4 @@ backupStorageProvider:
region: <YOUR_REGION>
s3ForcePathStyle: "true"
s3Url: <YOUR_URL_ACCESS_POINT>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m
---

View File

@ -31,7 +31,4 @@ backupStorageProvider:
region: minio
s3ForcePathStyle: "true"
s3Url: http://minio.heptio-ark.svc:9000
backupSyncPeriod: 1m
gcSyncPeriod: 1m
scheduleSyncPeriod: 1m
restoreOnlyMode: false

View File

@ -45,31 +45,6 @@ type Config struct {
// Ark backups are stored in object storage. This may be a different cloud than
// where the cluster is running.
BackupStorageProvider ObjectStorageProviderConfig `json:"backupStorageProvider"`
// BackupSyncPeriod is how often the BackupSyncController runs to ensure all
// Ark backups in object storage exist as Backup API objects in the cluster.
BackupSyncPeriod metav1.Duration `json:"backupSyncPeriod"`
// GCSyncPeriod is how often the GCController runs to delete expired backup
// API objects and corresponding backup files in object storage.
GCSyncPeriod metav1.Duration `json:"gcSyncPeriod"`
// ScheduleSyncPeriod is how often the ScheduleController runs to check for
// new backups that should be triggered based on schedules.
ScheduleSyncPeriod metav1.Duration `json:"scheduleSyncPeriod"`
// PodVolumeOperationTimeout is how long backups/restores of pod volumes (i.e.
// using restic) should be allowed to run before timing out.
PodVolumeOperationTimeout metav1.Duration `json:"podVolumeOperationTimeout"`
// ResourcePriorities is an ordered slice of resources specifying the desired
// order of resource restores. Any resources not in the list will be restored
// alphabetically after the prioritized resources.
ResourcePriorities []string `json:"resourcePriorities"`
// RestoreOnlyMode is whether Ark should run in a mode where only restores
// are allowed; backups, schedules, and garbage-collection are all disabled.
RestoreOnlyMode bool `json:"restoreOnlyMode"`
}
// CloudProviderConfig is configuration information about how to connect

View File

@ -339,15 +339,6 @@ func (in *Config) DeepCopyInto(out *Config) {
}
}
in.BackupStorageProvider.DeepCopyInto(&out.BackupStorageProvider)
out.BackupSyncPeriod = in.BackupSyncPeriod
out.GCSyncPeriod = in.GCSyncPeriod
out.ScheduleSyncPeriod = in.ScheduleSyncPeriod
out.PodVolumeOperationTimeout = in.PodVolumeOperationTimeout
if in.ResourcePriorities != nil {
in, out := &in.ResourcePriorities, &out.ResourcePriorities
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -75,11 +75,23 @@ const (
defaultMetricsAddress = ":8085"
)
type serverConfig struct {
pluginDir, metricsAddress string
backupSyncPeriod, podVolumeOperationTimeout time.Duration
restoreResourcePriorities []string
restoreOnly bool
}
func NewCommand() *cobra.Command {
var (
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
pluginDir = "/plugins"
metricsAddress = defaultMetricsAddress
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
config = serverConfig{
pluginDir: "/plugins",
metricsAddress: defaultMetricsAddress,
backupSyncPeriod: defaultBackupSyncPeriod,
podVolumeOperationTimeout: defaultPodVolumeOperationTimeout,
restoreResourcePriorities: defaultRestorePriorities,
}
)
var command = &cobra.Command{
@ -114,7 +126,7 @@ func NewCommand() *cobra.Command {
}
namespace := getServerNamespace(namespaceFlag)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), pluginDir, metricsAddress, logger)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), config, logger)
cmd.CheckError(err)
cmd.CheckError(s.run())
@ -122,8 +134,12 @@ func NewCommand() *cobra.Command {
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&pluginDir, "plugin-dir", pluginDir, "directory containing Ark plugins")
command.Flags().StringVar(&metricsAddress, "metrics-address", metricsAddress, "the address to expose prometheus metrics")
command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "directory containing Ark plugins")
command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "the address to expose prometheus metrics")
command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster")
command.Flags().DurationVar(&config.podVolumeOperationTimeout, "restic-timeout", config.podVolumeOperationTimeout, "how long backups/restores of pod volumes should be allowed to run before timing out")
command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled")
command.Flags().StringSliceVar(&config.restoreResourcePriorities, "restore-resource-priorities", config.restoreResourcePriorities, "desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources")
return command
}
@ -162,9 +178,10 @@ type server struct {
pluginManager plugin.Manager
resticManager restic.RepositoryManager
metrics *metrics.ServerMetrics
config serverConfig
}
func newServer(namespace, baseName, pluginDir, metricsAddr string, logger *logrus.Logger) (*server, error) {
func newServer(namespace, baseName string, config serverConfig, logger *logrus.Logger) (*server, error) {
clientConfig, err := client.Config("", "", baseName)
if err != nil {
return nil, err
@ -180,7 +197,7 @@ func newServer(namespace, baseName, pluginDir, metricsAddr string, logger *logru
return nil, errors.WithStack(err)
}
pluginRegistry := plugin.NewRegistry(pluginDir, logger, logger.Level)
pluginRegistry := plugin.NewRegistry(config.pluginDir, logger, logger.Level)
if err := pluginRegistry.DiscoverPlugins(); err != nil {
return nil, err
}
@ -198,7 +215,7 @@ func newServer(namespace, baseName, pluginDir, metricsAddr string, logger *logru
s := &server{
namespace: namespace,
metricsAddress: metricsAddr,
metricsAddress: config.metricsAddress,
kubeClientConfig: clientConfig,
kubeClient: kubeClient,
arkClient: arkClient,
@ -211,6 +228,7 @@ func newServer(namespace, baseName, pluginDir, metricsAddr string, logger *logru
logLevel: logger.Level,
pluginRegistry: pluginRegistry,
pluginManager: pluginManager,
config: config,
}
return s, nil
@ -245,7 +263,7 @@ func (s *server) run() error {
// watchConfig needs to examine the unmodified original config, so we keep that around as a
// separate object, and instead apply defaults to a clone.
config := originalConfig.DeepCopy()
applyConfigDefaults(config, s.logger)
s.applyConfigDefaults(config)
s.watchConfig(originalConfig)
@ -279,6 +297,32 @@ func (s *server) run() error {
return nil
}
func (s *server) applyConfigDefaults(c *api.Config) {
if s.config.backupSyncPeriod == 0 {
s.config.backupSyncPeriod = defaultBackupSyncPeriod
}
if s.config.podVolumeOperationTimeout == 0 {
s.config.podVolumeOperationTimeout = defaultPodVolumeOperationTimeout
}
if len(s.config.restoreResourcePriorities) == 0 {
s.config.restoreResourcePriorities = defaultRestorePriorities
s.logger.WithField("priorities", s.config.restoreResourcePriorities).Info("Using default resource priorities")
} else {
s.logger.WithField("priorities", s.config.restoreResourcePriorities).Info("Using given resource priorities")
}
if c.BackupStorageProvider.Config == nil {
c.BackupStorageProvider.Config = make(map[string]string)
}
// add the bucket name to the config map so that object stores can use
// it when initializing. The AWS object store uses this to determine the
// bucket's region when setting up its client.
c.BackupStorageProvider.Config["bucket"] = c.BackupStorageProvider.Bucket
}
// namespaceExists returns nil if namespace can be successfully
// gotten from the kubernetes API, or an error otherwise.
func (s *server) namespaceExists(namespace string) error {
@ -379,9 +423,7 @@ func (s *server) loadConfig() (*api.Config, error) {
}
const (
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
defaultPodVolumeOperationTimeout = 60 * time.Minute
)
@ -394,7 +436,7 @@ const (
// - Limit ranges go before pods or controllers so pods can use them.
// - Pods go before controllers so they can be explicitly restored and potentially
// have restic restores run before controllers adopt the pods.
var defaultResourcePriorities = []string{
var defaultRestorePriorities = []string{
"namespaces",
"persistentvolumes",
"persistentvolumeclaims",
@ -405,40 +447,6 @@ var defaultResourcePriorities = []string{
"pods",
}
func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
if c.GCSyncPeriod.Duration == 0 {
c.GCSyncPeriod.Duration = defaultGCSyncPeriod
}
if c.BackupSyncPeriod.Duration == 0 {
c.BackupSyncPeriod.Duration = defaultBackupSyncPeriod
}
if c.ScheduleSyncPeriod.Duration == 0 {
c.ScheduleSyncPeriod.Duration = defaultScheduleSyncPeriod
}
if c.PodVolumeOperationTimeout.Duration == 0 {
c.PodVolumeOperationTimeout.Duration = defaultPodVolumeOperationTimeout
}
if len(c.ResourcePriorities) == 0 {
c.ResourcePriorities = defaultResourcePriorities
logger.WithField("priorities", c.ResourcePriorities).Info("Using default resource priorities")
} else {
logger.WithField("priorities", c.ResourcePriorities).Info("Using resource priorities from config")
}
if c.BackupStorageProvider.Config == nil {
c.BackupStorageProvider.Config = make(map[string]string)
}
// add the bucket name to the config map so that object stores can use
// it when initializing. The AWS object store uses this to determine the
// bucket's region when setting up its client.
c.BackupStorageProvider.Config["bucket"] = c.BackupStorageProvider.Bucket
}
// watchConfig adds an update event handler to the Config shared informer, invoking s.cancelFunc
// when it sees a change.
func (s *server) watchConfig(config *api.Config) {
@ -572,7 +580,7 @@ func (s *server) runControllers(config *api.Config) error {
ctx := s.ctx
var wg sync.WaitGroup
cloudBackupCacheResyncPeriod := durationMin(config.GCSyncPeriod.Duration, config.BackupSyncPeriod.Duration)
cloudBackupCacheResyncPeriod := durationMin(controller.GCSyncPeriod, s.config.backupSyncPeriod)
s.logger.Infof("Caching cloud backups every %s", cloudBackupCacheResyncPeriod)
liveBackupLister := cloudprovider.NewLiveBackupLister(s.logger, s.objectStore)
@ -593,7 +601,7 @@ func (s *server) runControllers(config *api.Config) error {
s.arkClient.ArkV1(),
cachedBackupLister,
config.BackupStorageProvider.Bucket,
config.BackupSyncPeriod.Duration,
s.config.backupSyncPeriod,
s.namespace,
s.sharedInformerFactory.Ark().V1().Backups(),
s.logger,
@ -604,7 +612,7 @@ func (s *server) runControllers(config *api.Config) error {
wg.Done()
}()
if config.RestoreOnlyMode {
if s.config.restoreOnly {
s.logger.Info("Restore only mode - not starting the backup, schedule, delete-backup, or GC controllers")
} else {
backupTracker := controller.NewBackupTracker()
@ -615,7 +623,7 @@ func (s *server) runControllers(config *api.Config) error {
podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()),
s.blockStore,
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
s.config.podVolumeOperationTimeout,
)
cmd.CheckError(err)
@ -643,7 +651,6 @@ func (s *server) runControllers(config *api.Config) error {
s.arkClient.ArkV1(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Schedules(),
config.ScheduleSyncPeriod.Duration,
s.logger,
s.metrics,
)
@ -658,7 +665,6 @@ func (s *server) runControllers(config *api.Config) error {
s.sharedInformerFactory.Ark().V1().Backups(),
s.sharedInformerFactory.Ark().V1().DeleteBackupRequests(),
s.arkClient.ArkV1(),
config.GCSyncPeriod.Duration,
)
wg.Add(1)
go func() {
@ -692,11 +698,11 @@ func (s *server) runControllers(config *api.Config) error {
s.discoveryHelper,
client.NewDynamicFactory(s.dynamicClient),
s.blockStore,
config.ResourcePriorities,
s.config.restoreResourcePriorities,
s.arkClient.ArkV1(),
s.kubeClient.CoreV1().Namespaces(),
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
s.config.podVolumeOperationTimeout,
s.logger,
)
cmd.CheckError(err)

View File

@ -30,28 +30,28 @@ import (
func TestApplyConfigDefaults(t *testing.T) {
var (
logger = arktest.NewLogger()
c = &v1.Config{}
server = &server{
logger: arktest.NewLogger(),
config: serverConfig{},
}
)
// test defaulting
applyConfigDefaults(c, logger)
assert.Equal(t, defaultGCSyncPeriod, c.GCSyncPeriod.Duration)
assert.Equal(t, defaultBackupSyncPeriod, c.BackupSyncPeriod.Duration)
assert.Equal(t, defaultScheduleSyncPeriod, c.ScheduleSyncPeriod.Duration)
assert.Equal(t, defaultResourcePriorities, c.ResourcePriorities)
server.applyConfigDefaults(c)
assert.Equal(t, defaultBackupSyncPeriod, server.config.backupSyncPeriod)
assert.Equal(t, defaultPodVolumeOperationTimeout, server.config.podVolumeOperationTimeout)
assert.Equal(t, defaultRestorePriorities, server.config.restoreResourcePriorities)
// make sure defaulting doesn't overwrite real values
c.GCSyncPeriod.Duration = 5 * time.Minute
c.BackupSyncPeriod.Duration = 4 * time.Minute
c.ScheduleSyncPeriod.Duration = 3 * time.Minute
c.ResourcePriorities = []string{"a", "b"}
// // make sure defaulting doesn't overwrite real values
server.config.backupSyncPeriod = 4 * time.Minute
server.config.podVolumeOperationTimeout = 5 * time.Second
server.config.restoreResourcePriorities = []string{"a", "b"}
applyConfigDefaults(c, logger)
assert.Equal(t, 5*time.Minute, c.GCSyncPeriod.Duration)
assert.Equal(t, 4*time.Minute, c.BackupSyncPeriod.Duration)
assert.Equal(t, 3*time.Minute, c.ScheduleSyncPeriod.Duration)
assert.Equal(t, []string{"a", "b"}, c.ResourcePriorities)
server.applyConfigDefaults(c)
assert.Equal(t, 4*time.Minute, server.config.backupSyncPeriod)
assert.Equal(t, 5*time.Second, server.config.podVolumeOperationTimeout)
assert.Equal(t, []string{"a", "b"}, server.config.restoreResourcePriorities)
}
func TestArkResourcesExist(t *testing.T) {

View File

@ -34,6 +34,10 @@ import (
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
)
const (
GCSyncPeriod = 60 * time.Minute
)
// gcController creates DeleteBackupRequests for expired backups.
type gcController struct {
*genericController
@ -42,7 +46,6 @@ type gcController struct {
backupLister listers.BackupLister
deleteBackupRequestLister listers.DeleteBackupRequestLister
deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter
syncPeriod time.Duration
clock clock.Clock
}
@ -53,16 +56,9 @@ func NewGCController(
backupInformer informers.BackupInformer,
deleteBackupRequestInformer informers.DeleteBackupRequestInformer,
deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter,
syncPeriod time.Duration,
) Interface {
if syncPeriod < time.Minute {
logger.WithField("syncPeriod", syncPeriod).Info("Provided GC sync period is too short. Setting to 1 minute")
syncPeriod = time.Minute
}
c := &gcController{
genericController: newGenericController("gc-controller", logger),
syncPeriod: syncPeriod,
clock: clock.RealClock{},
backupLister: backupInformer.Lister(),
deleteBackupRequestLister: deleteBackupRequestInformer.Lister(),
@ -76,7 +72,7 @@ func NewGCController(
deleteBackupRequestInformer.Informer().HasSynced,
)
c.resyncPeriod = syncPeriod
c.resyncPeriod = GCSyncPeriod
c.resyncFunc = c.enqueueAllBackups
backupInformer.Informer().AddEventHandler(

View File

@ -50,7 +50,6 @@ func TestGCControllerEnqueueAllBackups(t *testing.T) {
sharedInformers.Ark().V1().Backups(),
sharedInformers.Ark().V1().DeleteBackupRequests(),
client.ArkV1(),
1*time.Millisecond,
).(*gcController)
)
@ -114,7 +113,6 @@ func TestGCControllerHasUpdateFunc(t *testing.T) {
sharedInformers.Ark().V1().Backups(),
sharedInformers.Ark().V1().DeleteBackupRequests(),
client.ArkV1(),
1*time.Millisecond,
).(*gcController)
keys := make(chan string)
@ -245,7 +243,6 @@ func TestGCControllerProcessQueueItem(t *testing.T) {
sharedInformers.Ark().V1().Backups(),
sharedInformers.Ark().V1().DeleteBackupRequests(),
client.ArkV1(),
1*time.Millisecond,
).(*gcController)
controller.clock = fakeClock

View File

@ -45,6 +45,10 @@ import (
kubeutil "github.com/heptio/ark/pkg/util/kube"
)
const (
scheduleSyncPeriod = time.Minute
)
type scheduleController struct {
namespace string
schedulesClient arkv1client.SchedulesGetter
@ -53,7 +57,6 @@ type scheduleController struct {
schedulesListerSynced cache.InformerSynced
syncHandler func(scheduleName string) error
queue workqueue.RateLimitingInterface
syncPeriod time.Duration
clock clock.Clock
logger logrus.FieldLogger
metrics *metrics.ServerMetrics
@ -64,26 +67,19 @@ func NewScheduleController(
schedulesClient arkv1client.SchedulesGetter,
backupsClient arkv1client.BackupsGetter,
schedulesInformer informers.ScheduleInformer,
syncPeriod time.Duration,
logger logrus.FieldLogger,
metrics *metrics.ServerMetrics,
) *scheduleController {
if syncPeriod < time.Minute {
logger.WithField("syncPeriod", syncPeriod).Info("Provided schedule sync period is too short. Setting to 1 minute")
syncPeriod = time.Minute
}
c := &scheduleController{
namespace: namespace,
schedulesClient: schedulesClient,
backupsClient: backupsClient,
schedulesLister: schedulesInformer.Lister(),
schedulesListerSynced: schedulesInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "schedule"),
syncPeriod: syncPeriod,
clock: clock.RealClock{},
logger: logger,
metrics: metrics,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "schedule"),
clock: clock.RealClock{},
logger: logger,
metrics: metrics,
}
c.syncHandler = c.processSchedule
@ -157,7 +153,7 @@ func (controller *scheduleController) Run(ctx context.Context, numWorkers int) e
}()
}
go wait.Until(controller.enqueueAllEnabledSchedules, controller.syncPeriod, ctx.Done())
go wait.Until(controller.enqueueAllEnabledSchedules, scheduleSyncPeriod, ctx.Done())
<-ctx.Done()
return nil

View File

@ -128,7 +128,6 @@ func TestProcessSchedule(t *testing.T) {
client.ArkV1(),
client.ArkV1(),
sharedInformers.Ark().V1().Schedules(),
time.Duration(0),
logger,
metrics.NewServerMetrics(),
)

View File

@ -20,8 +20,6 @@ import (
"time"
arkv1 "github.com/heptio/ark/pkg/apis/ark/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type arkConfigOption func(*arkConfig)
@ -97,18 +95,5 @@ func Config(
Bucket: bucket,
ResticLocation: c.resticLocation,
},
BackupSyncPeriod: metav1.Duration{
Duration: c.backupSyncPeriod,
},
GCSyncPeriod: metav1.Duration{
Duration: c.gcSyncPeriod,
},
ScheduleSyncPeriod: metav1.Duration{
Duration: time.Minute,
},
PodVolumeOperationTimeout: metav1.Duration{
Duration: c.podVolumeOperationTimeout,
},
RestoreOnlyMode: c.restoreOnly,
}
}