Merge pull request #799 from heptio/backup-locations
Replace config.backupStorageProvider with backup storage locationspull/818/head
commit
7aadc39cd6
|
@ -18,11 +18,11 @@
|
|||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"arm/disk",
|
||||
"arm/examples/helpers",
|
||||
"services/storage/mgmt/2017-10-01/storage",
|
||||
"storage"
|
||||
]
|
||||
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d"
|
||||
version = "v10.2.1-beta"
|
||||
revision = "2d1d76c9013c4feb6695a2346f0e66ea0ef77aa6"
|
||||
version = "v11.3.0-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
|
@ -806,6 +806,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "70b3cfc235408d89934ada479417194e2a82df523f459f7d9d3264538805ea98"
|
||||
inputs-digest = "4706135745ec21274791f454998d264dd167c78b472674ba813dca08cc962d7d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -66,7 +66,7 @@
|
|||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~10.2.1-beta"
|
||||
version = "~11.3.0-beta"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
|
|
|
@ -141,7 +141,11 @@ Specify the following values in the example files:
|
|||
|
||||
* In `examples/aws/00-ark-config.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3, region is optional and will be queried from the AWS S3 API if not provided). See the [Config definition][6] for details.
|
||||
* Replace `<YOUR_REGION>`. See the [Config definition][6] for details.
|
||||
|
||||
* In `examples/aws/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3 backup storage, region is optional and will be queried from the AWS S3 API if not provided). See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
|
@ -273,4 +277,5 @@ It can be set up for Ark by creating a role that will have required permissions,
|
|||
[5]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
|
||||
[6]: config-definition.md#aws
|
||||
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
|
||||
[20]: faq.md
|
||||
[20]: faq.md
|
||||
[21]: backupstoragelocation-definition.md#aws
|
|
@ -51,23 +51,15 @@ az storage account create \
|
|||
--access-tier Hot
|
||||
```
|
||||
|
||||
Create the blob container named `ark`. Feel free to use a different name, preferrably unique to a single Kubernetes cluster. See the [FAQ][20] for more details. You'll need to
|
||||
adjust the `bucket` field under `backupStorageProvider` in the Ark Config accordingly if you do.
|
||||
Create the blob container named `ark`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
|
||||
```bash
|
||||
az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
|
||||
# Obtain the storage access key for the storage account just created
|
||||
AZURE_STORAGE_KEY=`az storage account keys list \
|
||||
--account-name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
--query '[0].value' \
|
||||
-o tsv`
|
||||
```
|
||||
|
||||
## Create service principal
|
||||
|
||||
To integrate Ark with Azure, you must create an Ark-specific [service principal][17]. Note that seven environment variables must be set for Ark to work properly.
|
||||
To integrate Ark with Azure, you must create an Ark-specific [service principal][17].
|
||||
|
||||
1. Obtain your Azure Account Subscription ID and Tenant ID:
|
||||
|
||||
|
@ -79,11 +71,11 @@ To integrate Ark with Azure, you must create an Ark-specific [service principal]
|
|||
1. Set the name of the Resource Group that contains your Kubernetes cluster.
|
||||
|
||||
```bash
|
||||
# Make sure this is the name of the second resource group. See warning.
|
||||
# Make sure this is the name of the auto-generated resource group. See warning.
|
||||
AZURE_RESOURCE_GROUP=<NAME_OF_RESOURCE_GROUP_2>
|
||||
```
|
||||
|
||||
WARNING: `AZURE_RESOURCE_GROUP` must be set to the name of the second resource group that is created when you provision your cluster in Azure. Your cluster is provisioned in the resource group that you specified when you created the cluster. Your disks, however, are provisioned in the second resource group.
|
||||
WARNING: `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created when you provision your cluster in Azure. Your cluster is provisioned in the resource group that you specified when you created the cluster. Your disks, however, are provisioned in the second resource group.
|
||||
|
||||
If you are unsure of the Resource Group name, run the following command to get a list that you can select from. Then set the `AZURE_RESOURCE_GROUP` environment variable to the appropriate value.
|
||||
|
||||
|
@ -117,27 +109,29 @@ In the Ark root directory, run the following to first set up namespaces, RBAC, a
|
|||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Now you need to create a Secret that contains all the seven environment variables you just set. The command looks like the following:
|
||||
Now you need to create a Secret that contains all the environment variables you just set. The command looks like the following:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
--from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \
|
||||
--from-literal AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP} \
|
||||
--from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \
|
||||
--from-literal AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} \
|
||||
--from-literal AZURE_STORAGE_ACCOUNT_ID=${AZURE_STORAGE_ACCOUNT_ID} \
|
||||
--from-literal AZURE_STORAGE_KEY=${AZURE_STORAGE_KEY}
|
||||
--from-literal AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
```
|
||||
|
||||
Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `examples/azure/10-ark-config.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_TIMEOUT>`. See the [Config definition][8] for details.
|
||||
* Replace `<YOUR_TIMEOUT>`. See the [Config definition][8] for details.
|
||||
|
||||
Here is an example of a completed file.
|
||||
* In file `examples/azure/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BLOB_CONTAINER>`, `<YOUR_STORAGE_RESOURCE_GROUP>`, and `<YOUR_STORAGE_ACCOUNT>`. See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
Here is an example of a completed config file.
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
|
@ -149,9 +143,6 @@ persistentVolumeProvider:
|
|||
name: azure
|
||||
config:
|
||||
apiTimeout: 15m
|
||||
backupStorageProvider:
|
||||
name: azure
|
||||
bucket: ark
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
@ -166,9 +157,10 @@ In the root of your Ark directory, run:
|
|||
kubectl apply -f examples/azure/
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[8]: config-definition.md#azure
|
||||
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
[18]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
|
||||
[19]: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
|
||||
[20]: faq.md
|
||||
[0]: namespace.md
|
||||
[8]: config-definition.md#azure
|
||||
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
[18]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
|
||||
[19]: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
|
||||
[20]: faq.md
|
||||
[21]: backupstoragelocation-definition.md#azure
|
|
@ -0,0 +1,70 @@
|
|||
# Ark Backup Storage Locations
|
||||
|
||||
## Backup Storage Location
|
||||
|
||||
Ark can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
|
||||
Ark must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `ark server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
|
||||
> *NOTE*: `BackupStorageLocation` takes the place of the `Config.backupStorageProvider` key as of v0.10.0
|
||||
|
||||
A sample YAML `BackupStorageLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: myBucket
|
||||
config:
|
||||
region: us-west-2
|
||||
```
|
||||
|
||||
### Parameter Reference
|
||||
|
||||
The configurable parameters are as follows:
|
||||
|
||||
#### Main config parameters
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `objectStorage` | ObjectStorageLocation | Specification of the object storage for the given provider. |
|
||||
| `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for backup storage. |
|
||||
|
||||
#### AWS
|
||||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
##### objectStorage/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
#### Azure
|
||||
|
||||
##### objectStorage/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `resourceGroup` | string | Required Field | Name of the resource group containing the storage account for this backup storage location. |
|
||||
| `storageAccount` | string | Required Field | Name of the storage account for this backup storage location. |
|
||||
|
||||
#### GCP
|
||||
|
||||
No parameters required.
|
||||
|
||||
[0]: #aws
|
||||
[1]: #gcp
|
||||
[2]: #azure
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
|
|
@ -31,6 +31,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
|||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
* [ark backup-location](ark_backup-location.md) - Work with backup storage locations
|
||||
* [ark bug](ark_bug.md) - Report an Ark bug
|
||||
* [ark client](ark_client.md) - Ark client related commands
|
||||
* [ark completion](ark_completion.md) - Output shell completion code for the specified shell (bash or zsh)
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
## ark backup-location
|
||||
|
||||
Work with backup storage locations
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with backup storage locations
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for backup-location
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark backup-location create](ark_backup-location_create.md) - Create a backup storage location
|
||||
* [ark backup-location get](ark_backup-location_get.md) - Get backup storage locations
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
## ark backup-location create
|
||||
|
||||
Create a backup storage location
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a backup storage location
|
||||
|
||||
```
|
||||
ark backup-location create NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--bucket string name of the object storage bucket where backups should be stored
|
||||
--config mapStringString configuration key-value pairs
|
||||
-h, --help help for create
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup storage location
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--prefix string prefix under which all Ark data should be stored within the bucket. Optional.
|
||||
--provider string name of the backup storage provider (e.g. aws, azure, gcp)
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup-location](ark_backup-location.md) - Work with backup storage locations
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
## ark backup-location get
|
||||
|
||||
Get backup storage locations
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get backup storage locations
|
||||
|
||||
```
|
||||
ark backup-location get [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup-location](ark_backup-location.md) - Work with backup storage locations
|
||||
|
|
@ -26,6 +26,7 @@ ark backup create NAME [flags]
|
|||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--storage-location string location in which to store the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
-w, --wait wait for the operation to complete
|
||||
```
|
||||
|
|
|
@ -31,6 +31,7 @@ Create ark resources
|
|||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark create backup](ark_create_backup.md) - Create a backup
|
||||
* [ark create backup-location](ark_create_backup-location.md) - Create a backup storage location
|
||||
* [ark create restore](ark_create_restore.md) - Create a restore
|
||||
* [ark create schedule](ark_create_schedule.md) - Create a schedule
|
||||
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
## ark create backup-location
|
||||
|
||||
Create a backup storage location
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a backup storage location
|
||||
|
||||
```
|
||||
ark create backup-location NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--bucket string name of the object storage bucket where backups should be stored
|
||||
--config mapStringString configuration key-value pairs
|
||||
-h, --help help for backup-location
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup storage location
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--prefix string prefix under which all Ark data should be stored within the bucket. Optional.
|
||||
--provider string name of the backup storage provider (e.g. aws, azure, gcp)
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
|
|
@ -26,6 +26,7 @@ ark create backup NAME [flags]
|
|||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--storage-location string location in which to store the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
-w, --wait wait for the operation to complete
|
||||
```
|
||||
|
|
|
@ -41,6 +41,7 @@ ark create schedule NAME --schedule="0 */6 * * *"
|
|||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--storage-location string location in which to store the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ Get ark resources
|
|||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark get backup-locations](ark_get_backup-locations.md) - Get backup storage locations
|
||||
* [ark get backups](ark_get_backups.md) - Get backups
|
||||
* [ark get restores](ark_get_restores.md) - Get restores
|
||||
* [ark get schedules](ark_get_schedules.md) - Get schedules
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
## ark get backup-locations
|
||||
|
||||
Get backup storage locations
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get backup storage locations
|
||||
|
||||
```
|
||||
ark get backup-locations [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for backup-locations
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
|
|
@ -41,6 +41,7 @@ ark create schedule NAME --schedule="0 */6 * * *"
|
|||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--storage-location string location in which to store the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ ark server [flags]
|
|||
|
||||
```
|
||||
--backup-sync-period duration how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster (default 1h0m0s)
|
||||
--default-backup-storage-location string name of the default backup storage location (default "default")
|
||||
-h, --help help for server
|
||||
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
|
||||
--metrics-address string the address to expose prometheus metrics (default ":8085")
|
||||
|
|
|
@ -31,11 +31,6 @@ persistentVolumeProvider:
|
|||
name: aws
|
||||
config:
|
||||
region: us-west-2
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: ark
|
||||
config:
|
||||
region: us-west-2
|
||||
```
|
||||
|
||||
### Parameter Reference
|
||||
|
@ -49,52 +44,30 @@ The configurable parameters are as follows:
|
|||
| `persistentVolumeProvider` | CloudProviderConfig | None (Optional) | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted), if any.<br><br>If not specified, Backups and Restores requesting PV snapshots & restores, respectively, are considered invalid. <br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `persistentVolumeProvider/name` | String<br><br>(Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.) | None (Optional) | The name of the cloud provider the cluster is using for persistent volumes, if any. |
|
||||
| `persistentVolumeProvider/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for persistent volumes. |
|
||||
| `backupStorageProvider` | CloudProviderConfig | Required Field | The specification for whichever cloud provider will be used to actually store the backups. |
|
||||
| `backupStorageProvider/name` | String<br><br>(Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.) | Required Field | The name of the cloud provider that will be used to actually store the backups. |
|
||||
| `backupStorageProvider/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `backupStorageProvider/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for backup storage. |
|
||||
|
||||
#### AWS
|
||||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
##### backupStorageProvider/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
##### persistentVolumeProvider/config (AWS Only)
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
|
||||
#### GCP
|
||||
|
||||
#### backupStorageProvider/config
|
||||
|
||||
No parameters required.
|
||||
|
||||
##### persistentVolumeProvider/config
|
||||
|
||||
No parameters required.
|
||||
|
||||
#### Azure
|
||||
|
||||
##### backupStorageProvider/config
|
||||
|
||||
No parameters required.
|
||||
|
||||
##### persistentVolumeProvider/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiTimeout` | metav1.Duration | 2m0s | How long to wait for an Azure API request to complete before timeout. |
|
||||
|
||||
#### GCP
|
||||
|
||||
##### persistentVolumeProvider/config
|
||||
|
||||
No parameters required.
|
||||
|
||||
|
||||
## Deployment
|
||||
|
||||
Heptio Ark also defines its own Deployment object for starting the Ark server on Kubernetes. When the Ark server is deployed, there are specific configurations that might be changed.
|
||||
|
|
|
@ -41,7 +41,7 @@ into the Ark server pod. Ensure the following:
|
|||
This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted
|
||||
properly into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has seven keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
|
||||
|
|
|
@ -112,9 +112,9 @@ _Note: If you use a custom namespace, replace `heptio-ark` with the name of the
|
|||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In file `examples/gcp/00-ark-config.yaml`:
|
||||
* In file `examples/gcp/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`. See the [Config definition][7] for details.
|
||||
* Replace `<YOUR_BUCKET>`. See the [BackupStorageLocation definition][7] for details.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
|
@ -130,7 +130,7 @@ In the root of your Ark directory, run:
|
|||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[7]: config-definition.md#gcp
|
||||
[7]: backupstoragelocation-definition.md#gcp
|
||||
[15]: https://cloud.google.com/compute/docs/access/service-accounts
|
||||
[16]: https://cloud.google.com/sdk/docs/
|
||||
[20]: faq.md
|
||||
|
|
|
@ -53,9 +53,9 @@ kubectl create secret generic cloud-credentials \
|
|||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In `examples/ibm/00-ark-config.yaml`:
|
||||
* In `examples/ibm/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_REGION>` and `<YOUR_URL_ACCESS_POINT>`. See the [Config definition][6] for details.
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_REGION>` and `<YOUR_URL_ACCESS_POINT>`. See the [BackupStorageLocation definition][6] for details.
|
||||
|
||||
|
||||
|
||||
|
@ -78,5 +78,5 @@ In the root of your Ark directory, run:
|
|||
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
|
||||
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
|
||||
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
|
||||
[6]: config-definition.md#aws
|
||||
[6]: backupstoragelocation-definition.md#aws
|
||||
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
|
||||
|
|
|
@ -20,16 +20,5 @@ metadata:
|
|||
name: default
|
||||
persistentVolumeProvider:
|
||||
name: aws
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: <YOUR_BUCKET>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# resticLocation: <YOUR_RESTIC_LOCATION>
|
||||
config:
|
||||
region: <YOUR_REGION>
|
|
@ -14,21 +14,19 @@
|
|||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: Config
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: <YOUR_BUCKET>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# resticLocation: <YOUR_RESTIC_LOCATION>
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: <YOUR_URL_ACCESS_POINT>
|
||||
---
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: <YOUR_BUCKET>
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# restic-location: <YOUR_RESTIC_LOCATION>
|
|
@ -0,0 +1,33 @@
|
|||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: azure
|
||||
objectStorage:
|
||||
bucket: <YOUR_BLOB_CONTAINER>
|
||||
config:
|
||||
resourceGroup: <YOUR_STORAGE_RESOURCE_GROUP>
|
||||
storageAccount: <YOUR_STORAGE_ACCOUNT>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# restic-location: <YOUR_RESTIC_LOCATION>
|
|
@ -21,13 +21,4 @@ metadata:
|
|||
persistentVolumeProvider:
|
||||
name: azure
|
||||
config:
|
||||
apiTimeout: <YOUR_TIMEOUT>
|
||||
backupStorageProvider:
|
||||
name: azure
|
||||
bucket: <YOUR_BUCKET>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# resticLocation: <YOUR_RESTIC_LOCATION>
|
||||
apiTimeout: <YOUR_TIMEOUT>
|
|
@ -147,6 +147,21 @@ spec:
|
|||
plural: resticrepositories
|
||||
kind: ResticRepository
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: backupstoragelocations.ark.heptio.com
|
||||
labels:
|
||||
component: ark
|
||||
spec:
|
||||
group: ark.heptio.com
|
||||
version: v1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: backupstoragelocations
|
||||
kind: BackupStorageLocation
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
|
|
|
@ -19,14 +19,4 @@ metadata:
|
|||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
name: gcp
|
||||
backupStorageProvider:
|
||||
name: gcp
|
||||
bucket: <YOUR_BUCKET>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# resticLocation: <YOUR_RESTIC_LOCATION>
|
||||
|
||||
name: gcp
|
|
@ -0,0 +1,30 @@
|
|||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: gcp
|
||||
objectStorage:
|
||||
bucket: <YOUR_BUCKET>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# restic-location: <YOUR_RESTIC_LOCATION>
|
|
@ -0,0 +1,34 @@
|
|||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: <YOUR_BUCKET>
|
||||
config:
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: <YOUR_URL_ACCESS_POINT>
|
||||
region: <YOUR_REGION>
|
||||
# Uncomment the below line to enable restic integration.
|
||||
# The format for resticLocation is <bucket>[/<prefix>],
|
||||
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
|
||||
# This MUST be a different bucket than the main Ark bucket
|
||||
# specified just above.
|
||||
# restic-location: <YOUR_RESTIC_LOCATION>
|
|
@ -16,7 +16,9 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// BackupSpec defines the specification for an Ark backup.
|
||||
type BackupSpec struct {
|
||||
|
@ -56,6 +58,9 @@ type BackupSpec struct {
|
|||
|
||||
// Hooks represent custom behaviors that should be executed at different phases of the backup.
|
||||
Hooks BackupHooks `json:"hooks"`
|
||||
|
||||
// StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
|
||||
StorageLocation string `json:"storageLocation"`
|
||||
}
|
||||
|
||||
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupStorageLocation is a location where Ark stores backup objects.
|
||||
type BackupStorageLocation struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata"`
|
||||
|
||||
Spec BackupStorageLocationSpec `json:"spec"`
|
||||
Status BackupStorageLocationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupStorageLocationList is a list of BackupStorageLocations.
|
||||
type BackupStorageLocationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []BackupStorageLocation `json:"items"`
|
||||
}
|
||||
|
||||
// StorageType represents the type of storage that a backup location uses.
|
||||
// ObjectStorage must be non-nil, since it is currently the only supported StorageType.
|
||||
type StorageType struct {
|
||||
ObjectStorage *ObjectStorageLocation `json:"objectStorage,omitempty"`
|
||||
}
|
||||
|
||||
// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
|
||||
type ObjectStorageLocation struct {
|
||||
// Bucket is the bucket to use for object storage.
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// Prefix is the path inside a bucket to use for Ark storage. Optional.
|
||||
Prefix string `json:"prefix"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationSpec defines the specification for an Ark BackupStorageLocation.
|
||||
type BackupStorageLocationSpec struct {
|
||||
// Provider is the provider of the backup storage.
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Config is for provider-specific configuration fields.
|
||||
Config map[string]string `json:"config"`
|
||||
|
||||
StorageType `json:",inline"`
|
||||
}
|
||||
|
||||
// BackupStorageLocationPhase is the lifecyle phase of an Ark BackupStorageLocation.
|
||||
type BackupStorageLocationPhase string
|
||||
|
||||
const (
|
||||
// BackupStorageLocationPhaseAvailable means the location is available to read and write from.
|
||||
BackupStorageLocationPhaseAvailable BackupStorageLocationPhase = "Available"
|
||||
|
||||
// BackupStorageLocationPhaseUnavailable means the location is unavailable to read and write from.
|
||||
BackupStorageLocationPhaseUnavailable BackupStorageLocationPhase = "Unavailable"
|
||||
)
|
||||
|
||||
// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation.
|
||||
type BackupStorageLocationAccessMode string
|
||||
|
||||
const (
|
||||
// BackupStorageLocationAccessModeReadOnly represents read-only access to a BackupStorageLocation.
|
||||
BackupStorageLocationAccessModeReadOnly BackupStorageLocationAccessMode = "ReadOnly"
|
||||
|
||||
// BackupStorageLocationAccessModeReadWrite represents read and write access to a BackupStorageLocation.
|
||||
BackupStorageLocationAccessModeReadWrite BackupStorageLocationAccessMode = "ReadWrite"
|
||||
)
|
||||
|
||||
// BackupStorageLocationStatus describes the current status of an Ark BackupStorageLocation.
|
||||
type BackupStorageLocationStatus struct {
|
||||
Phase BackupStorageLocationPhase `json:"phase,omitempty"`
|
||||
AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"`
|
||||
}
|
|
@ -40,11 +40,6 @@ type Config struct {
|
|||
// PersistentVolumeProvider is the configuration information for the cloud where
|
||||
// the cluster is running and has PersistentVolumes to snapshot or restore. Optional.
|
||||
PersistentVolumeProvider *CloudProviderConfig `json:"persistentVolumeProvider"`
|
||||
|
||||
// BackupStorageProvider is the configuration information for the cloud where
|
||||
// Ark backups are stored in object storage. This may be a different cloud than
|
||||
// where the cluster is running.
|
||||
BackupStorageProvider ObjectStorageProviderConfig `json:"backupStorageProvider"`
|
||||
}
|
||||
|
||||
// CloudProviderConfig is configuration information about how to connect
|
||||
|
@ -54,21 +49,3 @@ type CloudProviderConfig struct {
|
|||
|
||||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
||||
// ObjectStorageProviderConfig is configuration information for connecting to
|
||||
// a particular bucket in object storage to access Ark backups.
|
||||
type ObjectStorageProviderConfig struct {
|
||||
// CloudProviderConfig is the configuration information for the cloud where
|
||||
// Ark backups are stored in object storage.
|
||||
CloudProviderConfig `json:",inline"`
|
||||
|
||||
// Bucket is the name of the bucket in object storage where Ark backups
|
||||
// are stored.
|
||||
Bucket string `json:"bucket"`
|
||||
|
||||
// ResticLocation is the bucket and optional prefix in object storage where
|
||||
// Ark stores restic backups of pod volumes, specified either as "bucket" or
|
||||
// "bucket/prefix". This bucket must be different than the `Bucket` field.
|
||||
// Optional.
|
||||
ResticLocation string `json:"resticLocation"`
|
||||
}
|
||||
|
|
|
@ -36,4 +36,8 @@ const (
|
|||
// a backup/restore-specific timeout value for pod volume operations (i.e.
|
||||
// restic backups/restores).
|
||||
PodVolumeOperationTimeoutAnnotation = "ark.heptio.com/pod-volume-timeout"
|
||||
|
||||
// StorageLocationLabel is the label key used to identify the storage
|
||||
// location of a backup.
|
||||
StorageLocationLabel = "ark.heptio.com/storage-location"
|
||||
)
|
||||
|
|
|
@ -59,15 +59,16 @@ func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeI
|
|||
// API group, keyed on Kind.
|
||||
func CustomResources() map[string]typeInfo {
|
||||
return map[string]typeInfo{
|
||||
"Backup": newTypeInfo("backups", &Backup{}, &BackupList{}),
|
||||
"Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}),
|
||||
"Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}),
|
||||
"Config": newTypeInfo("configs", &Config{}, &ConfigList{}),
|
||||
"DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}),
|
||||
"DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}),
|
||||
"PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}),
|
||||
"PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}),
|
||||
"ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}),
|
||||
"Backup": newTypeInfo("backups", &Backup{}, &BackupList{}),
|
||||
"Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}),
|
||||
"Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}),
|
||||
"Config": newTypeInfo("configs", &Config{}, &ConfigList{}),
|
||||
"DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}),
|
||||
"DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}),
|
||||
"PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}),
|
||||
"PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}),
|
||||
"ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}),
|
||||
"BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -301,6 +301,107 @@ func (in *BackupStatus) DeepCopy() *BackupStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStorageLocation) DeepCopyInto(out *BackupStorageLocation) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocation.
|
||||
func (in *BackupStorageLocation) DeepCopy() *BackupStorageLocation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupStorageLocation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BackupStorageLocation) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]BackupStorageLocation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationList.
|
||||
func (in *BackupStorageLocationList) DeepCopy() *BackupStorageLocationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupStorageLocationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BackupStorageLocationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec) {
|
||||
*out = *in
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
in.StorageType.DeepCopyInto(&out.StorageType)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationSpec.
|
||||
func (in *BackupStorageLocationSpec) DeepCopy() *BackupStorageLocationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupStorageLocationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationStatus.
|
||||
func (in *BackupStorageLocationStatus) DeepCopy() *BackupStorageLocationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupStorageLocationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloudProviderConfig) DeepCopyInto(out *CloudProviderConfig) {
|
||||
*out = *in
|
||||
|
@ -338,7 +439,6 @@ func (in *Config) DeepCopyInto(out *Config) {
|
|||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
in.BackupStorageProvider.DeepCopyInto(&out.BackupStorageProvider)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -625,18 +725,17 @@ func (in *ExecHook) DeepCopy() *ExecHook {
|
|||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ObjectStorageProviderConfig) DeepCopyInto(out *ObjectStorageProviderConfig) {
|
||||
func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) {
|
||||
*out = *in
|
||||
in.CloudProviderConfig.DeepCopyInto(&out.CloudProviderConfig)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageProviderConfig.
|
||||
func (in *ObjectStorageProviderConfig) DeepCopy() *ObjectStorageProviderConfig {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation.
|
||||
func (in *ObjectStorageLocation) DeepCopy() *ObjectStorageLocation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ObjectStorageProviderConfig)
|
||||
out := new(ObjectStorageLocation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
@ -1221,6 +1320,31 @@ func (in *ScheduleStatus) DeepCopy() *ScheduleStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageType) DeepCopyInto(out *StorageType) {
|
||||
*out = *in
|
||||
if in.ObjectStorage != nil {
|
||||
in, out := &in.ObjectStorage, &out.ObjectStorage
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ObjectStorageLocation)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageType.
|
||||
func (in *StorageType) DeepCopy() *StorageType {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StorageType)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeBackupInfo) DeepCopyInto(out *VolumeBackupInfo) {
|
||||
*out = *in
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/examples/helpers"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -40,16 +39,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
azureClientIDKey = "AZURE_CLIENT_ID"
|
||||
azureClientSecretKey = "AZURE_CLIENT_SECRET"
|
||||
azureSubscriptionIDKey = "AZURE_SUBSCRIPTION_ID"
|
||||
azureTenantIDKey = "AZURE_TENANT_ID"
|
||||
azureStorageAccountIDKey = "AZURE_STORAGE_ACCOUNT_ID"
|
||||
azureStorageKeyKey = "AZURE_STORAGE_KEY"
|
||||
azureResourceGroupKey = "AZURE_RESOURCE_GROUP"
|
||||
apiTimeoutKey = "apiTimeout"
|
||||
snapshotsResource = "snapshots"
|
||||
disksResource = "disks"
|
||||
resourceGroupEnvVar = "AZURE_RESOURCE_GROUP"
|
||||
apiTimeoutConfigKey = "apiTimeout"
|
||||
snapshotsResource = "snapshots"
|
||||
disksResource = "disks"
|
||||
)
|
||||
|
||||
type blockStore struct {
|
||||
|
@ -71,52 +64,37 @@ func (si *snapshotIdentifier) String() string {
|
|||
return getComputeResourceName(si.subscription, si.resourceGroup, snapshotsResource, si.name)
|
||||
}
|
||||
|
||||
func getConfig() map[string]string {
|
||||
cfg := map[string]string{
|
||||
azureClientIDKey: "",
|
||||
azureClientSecretKey: "",
|
||||
azureSubscriptionIDKey: "",
|
||||
azureTenantIDKey: "",
|
||||
azureStorageAccountIDKey: "",
|
||||
azureStorageKeyKey: "",
|
||||
azureResourceGroupKey: "",
|
||||
}
|
||||
|
||||
for key := range cfg {
|
||||
cfg[key] = os.Getenv(key)
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func NewBlockStore(logger logrus.FieldLogger) cloudprovider.BlockStore {
|
||||
return &blockStore{log: logger}
|
||||
}
|
||||
|
||||
func (b *blockStore) Init(config map[string]string) error {
|
||||
var (
|
||||
apiTimeoutVal = config[apiTimeoutKey]
|
||||
apiTimeout time.Duration
|
||||
err error
|
||||
)
|
||||
|
||||
if apiTimeout, err = time.ParseDuration(apiTimeoutVal); err != nil {
|
||||
return errors.Wrapf(err, "could not parse %s (expected time.Duration)", apiTimeoutKey)
|
||||
}
|
||||
|
||||
if apiTimeout == 0 {
|
||||
apiTimeout = 2 * time.Minute
|
||||
}
|
||||
|
||||
cfg := getConfig()
|
||||
|
||||
spt, err := helpers.NewServicePrincipalTokenFromCredentials(cfg, azure.PublicCloud.ResourceManagerEndpoint)
|
||||
// 1. we need AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_RESOURCE_GROUP
|
||||
envVars, err := getRequiredValues(os.Getenv, tenantIDEnvVar, clientIDEnvVar, clientSecretEnvVar, subscriptionIDEnvVar, resourceGroupEnvVar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error creating new service principal token")
|
||||
return errors.Wrap(err, "unable to get all required environment variables")
|
||||
}
|
||||
|
||||
disksClient := disk.NewDisksClient(cfg[azureSubscriptionIDKey])
|
||||
snapsClient := disk.NewSnapshotsClient(cfg[azureSubscriptionIDKey])
|
||||
// 2. if config["apiTimeout"] is empty, default to 2m; otherwise, parse it
|
||||
var apiTimeout time.Duration
|
||||
if val := config[apiTimeoutConfigKey]; val == "" {
|
||||
apiTimeout = 2 * time.Minute
|
||||
} else {
|
||||
apiTimeout, err = time.ParseDuration(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to parse value %q for config key %q (expected a duration string)", val, apiTimeoutConfigKey)
|
||||
}
|
||||
}
|
||||
|
||||
// 3. get SPT
|
||||
spt, err := newServicePrincipalToken(envVars[tenantIDEnvVar], envVars[clientIDEnvVar], envVars[clientSecretEnvVar], azure.PublicCloud.ResourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting service principal token")
|
||||
}
|
||||
|
||||
// 4. set up clients
|
||||
disksClient := disk.NewDisksClient(envVars[subscriptionIDEnvVar])
|
||||
snapsClient := disk.NewSnapshotsClient(envVars[subscriptionIDEnvVar])
|
||||
|
||||
disksClient.PollingDelay = 5 * time.Second
|
||||
snapsClient.PollingDelay = 5 * time.Second
|
||||
|
@ -127,8 +105,8 @@ func (b *blockStore) Init(config map[string]string) error {
|
|||
|
||||
b.disks = &disksClient
|
||||
b.snaps = &snapsClient
|
||||
b.subscription = cfg[azureSubscriptionIDKey]
|
||||
b.resourceGroup = cfg[azureResourceGroupKey]
|
||||
b.subscription = envVars[subscriptionIDEnvVar]
|
||||
b.resourceGroup = envVars[resourceGroupEnvVar]
|
||||
b.apiTimeout = apiTimeout
|
||||
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
tenantIDEnvVar = "AZURE_TENANT_ID"
|
||||
subscriptionIDEnvVar = "AZURE_SUBSCRIPTION_ID"
|
||||
clientIDEnvVar = "AZURE_CLIENT_ID"
|
||||
clientSecretEnvVar = "AZURE_CLIENT_SECRET"
|
||||
)
|
||||
|
||||
func newServicePrincipalToken(tenantID, clientID, clientSecret, scope string) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting OAuthConfig")
|
||||
}
|
||||
|
||||
return adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, scope)
|
||||
}
|
||||
|
||||
func getRequiredValues(getValue func(string) string, keys ...string) (map[string]string, error) {
|
||||
missing := []string{}
|
||||
results := map[string]string{}
|
||||
|
||||
for _, key := range keys {
|
||||
if val := getValue(key); val == "" {
|
||||
missing = append(missing, key)
|
||||
} else {
|
||||
results[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
|
@ -18,16 +18,25 @@ package azure
|
|||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
storagemgmt "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceGroupConfigKey = "resourceGroup"
|
||||
storageAccountConfigKey = "storageAccount"
|
||||
)
|
||||
|
||||
type objectStore struct {
|
||||
blobClient *storage.BlobStorageClient
|
||||
log logrus.FieldLogger
|
||||
|
@ -37,16 +46,74 @@ func NewObjectStore(logger logrus.FieldLogger) cloudprovider.ObjectStore {
|
|||
return &objectStore{log: logger}
|
||||
}
|
||||
|
||||
func (o *objectStore) Init(config map[string]string) error {
|
||||
cfg := getConfig()
|
||||
|
||||
storageClient, err := storage.NewBasicClient(cfg[azureStorageAccountIDKey], cfg[azureStorageKeyKey])
|
||||
func getStorageAccountKey(client storagemgmt.AccountsClient, resourceGroup, storageAccount string) (string, error) {
|
||||
res, err := client.ListKeys(resourceGroup, storageAccount)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
if res.Keys == nil || len(*res.Keys) == 0 {
|
||||
return "", errors.New("No storage keys found")
|
||||
}
|
||||
|
||||
var storageKey string
|
||||
|
||||
for _, key := range *res.Keys {
|
||||
// uppercase both strings for comparison because the ListKeys call returns e.g. "FULL" but
|
||||
// the storagemgmt.Full constant in the SDK is defined as "Full".
|
||||
if strings.ToUpper(string(key.Permissions)) == strings.ToUpper(string(storagemgmt.Full)) {
|
||||
storageKey = *key.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if storageKey == "" {
|
||||
return "", errors.New("No storage key with Full permissions found")
|
||||
}
|
||||
|
||||
return storageKey, nil
|
||||
}
|
||||
|
||||
func mapLookup(data map[string]string) func(string) string {
|
||||
return func(key string) string {
|
||||
return data[key]
|
||||
}
|
||||
}
|
||||
|
||||
func (o *objectStore) Init(config map[string]string) error {
|
||||
// 1. we need AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID
|
||||
envVars, err := getRequiredValues(os.Getenv, tenantIDEnvVar, clientIDEnvVar, clientSecretEnvVar, subscriptionIDEnvVar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get all required environment variables")
|
||||
}
|
||||
|
||||
// 2. we need config["resourceGroup"], config["storageAccount"]
|
||||
if _, err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil {
|
||||
return errors.Wrap(err, "unable to get all required config values")
|
||||
}
|
||||
|
||||
// 3. get SPT
|
||||
spt, err := newServicePrincipalToken(envVars[tenantIDEnvVar], envVars[clientIDEnvVar], envVars[clientSecretEnvVar], azure.PublicCloud.ResourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting service principal token")
|
||||
}
|
||||
|
||||
// 4. get storageAccountsClient
|
||||
storageAccountsClient := storagemgmt.NewAccountsClient(envVars[subscriptionIDEnvVar])
|
||||
storageAccountsClient.Authorizer = autorest.NewBearerAuthorizer(spt)
|
||||
|
||||
// 5. get storage key
|
||||
storageAccountKey, err := getStorageAccountKey(storageAccountsClient, config[resourceGroupConfigKey], config[storageAccountConfigKey])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting storage account key")
|
||||
}
|
||||
|
||||
// 6. get storageClient and blobClient
|
||||
storageClient, err := storage.NewBasicClient(config[storageAccountConfigKey], storageAccountKey)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting storage client")
|
||||
}
|
||||
|
||||
blobClient := storageClient.GetBlobService()
|
||||
|
||||
o.blobClient = &blobClient
|
||||
|
||||
return nil
|
||||
|
@ -147,8 +214,6 @@ func (o *objectStore) DeleteObject(bucket string, key string) error {
|
|||
return errors.WithStack(blob.Delete(nil))
|
||||
}
|
||||
|
||||
const sasURIReadPermission = "r"
|
||||
|
||||
func (o *objectStore) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
|
@ -160,7 +225,16 @@ func (o *objectStore) CreateSignedURL(bucket, key string, ttl time.Duration) (st
|
|||
return "", err
|
||||
}
|
||||
|
||||
return blob.GetSASURI(time.Now().Add(ttl), sasURIReadPermission)
|
||||
opts := storage.BlobSASOptions{
|
||||
SASOptions: storage.SASOptions{
|
||||
Expiry: time.Now().Add(ttl),
|
||||
},
|
||||
BlobServiceSASPermissions: storage.BlobServiceSASPermissions{
|
||||
Read: true,
|
||||
},
|
||||
}
|
||||
|
||||
return blob.GetSASURI(opts)
|
||||
}
|
||||
|
||||
func getContainerReference(blobClient *storage.BlobStorageClient, bucket string) (*storage.Container, error) {
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
// backupCacheBucket holds the backups and error from a ListBackups call.
|
||||
type backupCacheBucket struct {
|
||||
backups []*v1.Backup
|
||||
error error
|
||||
}
|
||||
|
||||
// backupCache caches ListBackups calls, refreshing them periodically.
|
||||
type backupCache struct {
|
||||
delegate BackupLister
|
||||
lock sync.RWMutex
|
||||
// This doesn't really need to be a map right now, but if we ever move to supporting multiple
|
||||
// buckets, this will be ready for it.
|
||||
buckets map[string]*backupCacheBucket
|
||||
logger logrus.FieldLogger
|
||||
}
|
||||
|
||||
var _ BackupLister = &backupCache{}
|
||||
|
||||
// NewBackupCache returns a new backup cache that refreshes from delegate every resyncPeriod.
|
||||
func NewBackupCache(ctx context.Context, delegate BackupLister, resyncPeriod time.Duration, logger logrus.FieldLogger) BackupLister {
|
||||
c := &backupCache{
|
||||
delegate: delegate,
|
||||
buckets: make(map[string]*backupCacheBucket),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
// Start the goroutine to refresh all buckets every resyncPeriod. This stops when ctx.Done() is
|
||||
// available.
|
||||
go wait.Until(c.refresh, resyncPeriod, ctx.Done())
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// refresh refreshes all the buckets currently in the cache by doing a live lookup via c.delegate.
|
||||
func (c *backupCache) refresh() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.logger.Debug("refreshing all cached backup lists from object storage")
|
||||
|
||||
for bucketName, bucket := range c.buckets {
|
||||
c.logger.WithField("bucket", bucketName).Debug("Refreshing bucket")
|
||||
bucket.backups, bucket.error = c.delegate.ListBackups(bucketName)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *backupCache) ListBackups(bucketName string) ([]*v1.Backup, error) {
|
||||
c.lock.RLock()
|
||||
bucket, found := c.buckets[bucketName]
|
||||
c.lock.RUnlock()
|
||||
|
||||
logContext := c.logger.WithField("bucket", bucketName)
|
||||
|
||||
if found {
|
||||
logContext.Debug("Returning cached backup list")
|
||||
return bucket.backups, bucket.error
|
||||
}
|
||||
|
||||
logContext.Debug("Bucket is not in cache - doing a live lookup")
|
||||
|
||||
backups, err := c.delegate.ListBackups(bucketName)
|
||||
c.lock.Lock()
|
||||
c.buckets[bucketName] = &backupCacheBucket{backups: backups, error: err}
|
||||
c.lock.Unlock()
|
||||
|
||||
return backups, err
|
||||
}
|
|
@ -1,170 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
cloudprovidermocks "github.com/heptio/ark/pkg/cloudprovider/mocks"
|
||||
"github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestNewBackupCache(t *testing.T) {
|
||||
var (
|
||||
delegate = &cloudprovidermocks.BackupLister{}
|
||||
ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second)
|
||||
logger = test.NewLogger()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
c := NewBackupCache(ctx, delegate, 100*time.Millisecond, logger)
|
||||
|
||||
// nothing in cache, live lookup
|
||||
bucket1 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup1").Backup,
|
||||
test.NewTestBackup().WithName("backup2").Backup,
|
||||
}
|
||||
delegate.On("ListBackups", "bucket1").Return(bucket1, nil).Once()
|
||||
|
||||
// should be updated via refresh
|
||||
updatedBucket1 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup2").Backup,
|
||||
}
|
||||
delegate.On("ListBackups", "bucket1").Return(updatedBucket1, nil)
|
||||
|
||||
// nothing in cache, live lookup
|
||||
bucket2 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup5").Backup,
|
||||
test.NewTestBackup().WithName("backup6").Backup,
|
||||
}
|
||||
delegate.On("ListBackups", "bucket2").Return(bucket2, nil).Once()
|
||||
|
||||
// should be updated via refresh
|
||||
updatedBucket2 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup7").Backup,
|
||||
}
|
||||
delegate.On("ListBackups", "bucket2").Return(updatedBucket2, nil)
|
||||
|
||||
backups, err := c.ListBackups("bucket1")
|
||||
assert.Equal(t, bucket1, backups)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backups, err = c.ListBackups("bucket2")
|
||||
assert.Equal(t, bucket2, backups)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var done1, done2 bool
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timed out")
|
||||
default:
|
||||
if done1 && done2 {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
backups, err = c.ListBackups("bucket1")
|
||||
if len(backups) == 1 {
|
||||
if assert.Equal(t, updatedBucket1[0], backups[0]) {
|
||||
done1 = true
|
||||
}
|
||||
}
|
||||
|
||||
backups, err = c.ListBackups("bucket2")
|
||||
if len(backups) == 1 {
|
||||
if assert.Equal(t, updatedBucket2[0], backups[0]) {
|
||||
done2 = true
|
||||
}
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupCacheRefresh(t *testing.T) {
|
||||
var (
|
||||
delegate = &cloudprovidermocks.BackupLister{}
|
||||
logger = test.NewLogger()
|
||||
)
|
||||
|
||||
c := &backupCache{
|
||||
delegate: delegate,
|
||||
buckets: map[string]*backupCacheBucket{
|
||||
"bucket1": {},
|
||||
"bucket2": {},
|
||||
},
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
bucket1 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup1").Backup,
|
||||
test.NewTestBackup().WithName("backup2").Backup,
|
||||
}
|
||||
delegate.On("ListBackups", "bucket1").Return(bucket1, nil)
|
||||
|
||||
delegate.On("ListBackups", "bucket2").Return(nil, errors.New("bad"))
|
||||
|
||||
c.refresh()
|
||||
|
||||
assert.Equal(t, bucket1, c.buckets["bucket1"].backups)
|
||||
assert.NoError(t, c.buckets["bucket1"].error)
|
||||
|
||||
assert.Empty(t, c.buckets["bucket2"].backups)
|
||||
assert.EqualError(t, c.buckets["bucket2"].error, "bad")
|
||||
}
|
||||
|
||||
func TestBackupCacheGetAllBackupsUsesCacheIfPresent(t *testing.T) {
|
||||
var (
|
||||
delegate = &cloudprovidermocks.BackupLister{}
|
||||
logger = test.NewLogger()
|
||||
bucket1 = []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup1").Backup,
|
||||
test.NewTestBackup().WithName("backup2").Backup,
|
||||
}
|
||||
)
|
||||
|
||||
c := &backupCache{
|
||||
delegate: delegate,
|
||||
buckets: map[string]*backupCacheBucket{
|
||||
"bucket1": {
|
||||
backups: bucket1,
|
||||
},
|
||||
},
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
bucket2 := []*v1.Backup{
|
||||
test.NewTestBackup().WithName("backup3").Backup,
|
||||
test.NewTestBackup().WithName("backup4").Backup,
|
||||
}
|
||||
|
||||
delegate.On("ListBackups", "bucket2").Return(bucket2, nil)
|
||||
|
||||
backups, err := c.ListBackups("bucket1")
|
||||
assert.Equal(t, bucket1, backups)
|
||||
assert.NoError(t, err)
|
||||
|
||||
backups, err = c.ListBackups("bucket2")
|
||||
assert.Equal(t, bucket2, backups)
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -147,22 +147,6 @@ func DownloadBackup(objectStore ObjectStore, bucket, backupName string) (io.Read
|
|||
return objectStore.GetObject(bucket, getBackupContentsKey(backupName, backupName))
|
||||
}
|
||||
|
||||
type liveBackupLister struct {
|
||||
logger logrus.FieldLogger
|
||||
objectStore ObjectStore
|
||||
}
|
||||
|
||||
func NewLiveBackupLister(logger logrus.FieldLogger, objectStore ObjectStore) BackupLister {
|
||||
return &liveBackupLister{
|
||||
logger: logger,
|
||||
objectStore: objectStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *liveBackupLister) ListBackups(bucket string) ([]*api.Backup, error) {
|
||||
return ListBackups(l.logger, l.objectStore, bucket)
|
||||
}
|
||||
|
||||
func ListBackups(logger logrus.FieldLogger, objectStore ObjectStore, bucket string) ([]*api.Backup, error) {
|
||||
prefixes, err := objectStore.ListCommonPrefixes(bucket, "/")
|
||||
if err != nil {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backuplocation"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/bug"
|
||||
cliclient "github.com/heptio/ark/pkg/cmd/cli/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/completion"
|
||||
|
@ -71,6 +72,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
|
|||
completion.NewCommand(),
|
||||
restic.NewCommand(f),
|
||||
bug.NewCommand(),
|
||||
backuplocation.NewCommand(f),
|
||||
)
|
||||
|
||||
// add the glog flags
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/flag"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
func NewCreateCommand(f client.Factory, use string) *cobra.Command {
|
||||
|
@ -42,8 +43,8 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command {
|
|||
Short: "Create a backup",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
cmd.CheckError(o.Complete(args))
|
||||
cmd.CheckError(o.Validate(c, args))
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Run(c, f))
|
||||
},
|
||||
}
|
||||
|
@ -68,6 +69,9 @@ type CreateOptions struct {
|
|||
Selector flag.LabelSelector
|
||||
IncludeClusterResources flag.OptionalBool
|
||||
Wait bool
|
||||
StorageLocation string
|
||||
|
||||
client arkclient.Interface
|
||||
}
|
||||
|
||||
func NewCreateOptions() *CreateOptions {
|
||||
|
@ -87,6 +91,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
|||
flags.Var(&o.IncludeResources, "include-resources", "resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)")
|
||||
flags.Var(&o.ExcludeResources, "exclude-resources", "resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io")
|
||||
flags.Var(&o.Labels, "labels", "labels to apply to the backup")
|
||||
flags.StringVar(&o.StorageLocation, "storage-location", "", "location in which to store the backup")
|
||||
flags.VarP(&o.Selector, "selector", "l", "only back up resources matching this label selector")
|
||||
f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "take snapshots of PersistentVolumes as part of the backup")
|
||||
// this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true"
|
||||
|
@ -103,24 +108,31 @@ func (o *CreateOptions) BindWait(flags *pflag.FlagSet) {
|
|||
flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "wait for the operation to complete")
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
|
||||
if err := output.ValidateFlags(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.StorageLocation != "" {
|
||||
if _, err := o.client.ArkV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Complete(args []string) error {
|
||||
func (o *CreateOptions) Complete(args []string, f client.Factory) error {
|
||||
o.Name = args[0]
|
||||
client, err := f.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.client = client
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
arkClient, err := f.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
backup := &api.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -137,6 +149,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
SnapshotVolumes: o.SnapshotVolumes.Value,
|
||||
TTL: metav1.Duration{Duration: o.TTL},
|
||||
IncludeClusterResources: o.IncludeClusterResources.Value,
|
||||
StorageLocation: o.StorageLocation,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -152,7 +165,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
|
||||
updates = make(chan *api.Backup)
|
||||
|
||||
backupInformer = v1.NewBackupInformer(arkClient, f.Namespace(), 0, nil)
|
||||
backupInformer = v1.NewBackupInformer(o.client, f.Namespace(), 0, nil)
|
||||
|
||||
backupInformer.AddEventHandler(
|
||||
cache.FilteringResourceEventHandler{
|
||||
|
@ -184,7 +197,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
go backupInformer.Run(stop)
|
||||
}
|
||||
|
||||
_, err = arkClient.ArkV1().Backups(backup.Namespace).Create(backup)
|
||||
_, err := o.client.ArkV1().Backups(backup.Namespace).Create(backup)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backuplocation
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
)
|
||||
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "backup-location",
|
||||
Short: "Work with backup storage locations",
|
||||
Long: "Work with backup storage locations",
|
||||
}
|
||||
|
||||
c.AddCommand(
|
||||
NewCreateCommand(f, "create"),
|
||||
NewGetCommand(f, "get"),
|
||||
)
|
||||
|
||||
return c
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backuplocation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/flag"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
)
|
||||
|
||||
func NewCreateCommand(f client.Factory, use string) *cobra.Command {
|
||||
o := NewCreateOptions()
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: use + " NAME",
|
||||
Short: "Create a backup storage location",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Run(c, f))
|
||||
},
|
||||
}
|
||||
|
||||
o.BindFlags(c.Flags())
|
||||
output.BindFlags(c.Flags())
|
||||
output.ClearOutputFlagDefault(c)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
type CreateOptions struct {
|
||||
Name string
|
||||
Provider string
|
||||
Bucket string
|
||||
Prefix string
|
||||
Config flag.Map
|
||||
Labels flag.Map
|
||||
}
|
||||
|
||||
func NewCreateOptions() *CreateOptions {
|
||||
return &CreateOptions{
|
||||
Config: flag.NewMap(),
|
||||
}
|
||||
}
|
||||
|
||||
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.Provider, "provider", o.Provider, "name of the backup storage provider (e.g. aws, azure, gcp)")
|
||||
flags.StringVar(&o.Bucket, "bucket", o.Bucket, "name of the object storage bucket where backups should be stored")
|
||||
flags.StringVar(&o.Prefix, "prefix", o.Prefix, "prefix under which all Ark data should be stored within the bucket. Optional.")
|
||||
flags.Var(&o.Config, "config", "configuration key-value pairs")
|
||||
flags.Var(&o.Labels, "labels", "labels to apply to the backup storage location")
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
|
||||
if err := output.ValidateFlags(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.Provider == "" {
|
||||
return errors.New("--provider is required")
|
||||
}
|
||||
|
||||
if o.Bucket == "" {
|
||||
return errors.New("--bucket is required")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Complete(args []string, f client.Factory) error {
|
||||
o.Name = args[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
backupStorageLocation := &api.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace(),
|
||||
Name: o.Name,
|
||||
Labels: o.Labels.Data(),
|
||||
},
|
||||
Spec: api.BackupStorageLocationSpec{
|
||||
Provider: o.Provider,
|
||||
StorageType: api.StorageType{
|
||||
ObjectStorage: &api.ObjectStorageLocation{
|
||||
Bucket: o.Bucket,
|
||||
Prefix: o.Prefix,
|
||||
},
|
||||
},
|
||||
Config: o.Config.Data(),
|
||||
},
|
||||
}
|
||||
|
||||
if printed, err := output.PrintWithFormat(c, backupStorageLocation); printed || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := f.Client()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := client.ArkV1().BackupStorageLocations(backupStorageLocation.Namespace).Create(backupStorageLocation); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Backup storage location %q configured successfully.\n", backupStorageLocation.Name)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backuplocation
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
)
|
||||
|
||||
func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
||||
var listOptions metav1.ListOptions
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: use,
|
||||
Short: "Get backup storage locations",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
err := output.ValidateFlags(c)
|
||||
cmd.CheckError(err)
|
||||
|
||||
arkClient, err := f.Client()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var locations *api.BackupStorageLocationList
|
||||
if len(args) > 0 {
|
||||
locations = new(api.BackupStorageLocationList)
|
||||
for _, name := range args {
|
||||
location, err := arkClient.Ark().BackupStorageLocations(f.Namespace()).Get(name, metav1.GetOptions{})
|
||||
cmd.CheckError(err)
|
||||
locations.Items = append(locations.Items, *location)
|
||||
}
|
||||
} else {
|
||||
locations, err = arkClient.ArkV1().BackupStorageLocations(f.Namespace()).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
_, err = output.PrintWithFormat(c, locations)
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
|
||||
|
||||
output.BindFlags(c.Flags())
|
||||
|
||||
return c
|
||||
}
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backuplocation"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
)
|
||||
|
@ -36,6 +37,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
|||
backup.NewCreateCommand(f, "backup"),
|
||||
schedule.NewCreateCommand(f, "schedule"),
|
||||
restore.NewCreateCommand(f, "restore"),
|
||||
backuplocation.NewCreateCommand(f, "backup-location"),
|
||||
)
|
||||
|
||||
return c
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backuplocation"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
)
|
||||
|
@ -41,10 +42,14 @@ func NewCommand(f client.Factory) *cobra.Command {
|
|||
restoreCommand := restore.NewGetCommand(f, "restores")
|
||||
restoreCommand.Aliases = []string{"restore"}
|
||||
|
||||
backupLocationCommand := backuplocation.NewGetCommand(f, "backup-locations")
|
||||
backupLocationCommand.Aliases = []string{"backup-location"}
|
||||
|
||||
c.AddCommand(
|
||||
backupCommand,
|
||||
scheduleCommand,
|
||||
restoreCommand,
|
||||
backupLocationCommand,
|
||||
)
|
||||
|
||||
return c
|
||||
|
|
|
@ -51,8 +51,8 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command {
|
|||
Example: `ark create schedule NAME --schedule="0 */6 * * *"`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
cmd.CheckError(o.Complete(args))
|
||||
cmd.CheckError(o.Validate(c, args))
|
||||
cmd.CheckError(o.Complete(args, f))
|
||||
cmd.CheckError(o.Validate(c, args, f))
|
||||
cmd.CheckError(o.Run(c, f))
|
||||
},
|
||||
}
|
||||
|
@ -82,16 +82,16 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
|||
flags.StringVar(&o.Schedule, "schedule", o.Schedule, "a cron expression specifying a recurring schedule for this backup to run")
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
|
||||
func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error {
|
||||
if len(o.Schedule) == 0 {
|
||||
return errors.New("--schedule is required")
|
||||
}
|
||||
|
||||
return o.BackupOptions.Validate(c, args)
|
||||
return o.BackupOptions.Validate(c, args, f)
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Complete(args []string) error {
|
||||
return o.BackupOptions.Complete(args)
|
||||
func (o *CreateOptions) Complete(args []string, f client.Factory) error {
|
||||
return o.BackupOptions.Complete(args, f)
|
||||
}
|
||||
|
||||
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
|
@ -114,6 +114,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
|||
LabelSelector: o.BackupOptions.Selector.LabelSelector,
|
||||
SnapshotVolumes: o.BackupOptions.SnapshotVolumes.Value,
|
||||
TTL: metav1.Duration{Duration: o.BackupOptions.TTL},
|
||||
StorageLocation: o.BackupOptions.StorageLocation,
|
||||
},
|
||||
Schedule: o.Schedule,
|
||||
},
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
@ -67,7 +68,6 @@ import (
|
|||
"github.com/heptio/ark/pkg/util/kube"
|
||||
"github.com/heptio/ark/pkg/util/logging"
|
||||
"github.com/heptio/ark/pkg/util/stringslice"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -76,10 +76,10 @@ const (
|
|||
)
|
||||
|
||||
type serverConfig struct {
|
||||
pluginDir, metricsAddress string
|
||||
backupSyncPeriod, podVolumeOperationTimeout time.Duration
|
||||
restoreResourcePriorities []string
|
||||
restoreOnly bool
|
||||
pluginDir, metricsAddress, defaultBackupLocation string
|
||||
backupSyncPeriod, podVolumeOperationTimeout time.Duration
|
||||
restoreResourcePriorities []string
|
||||
restoreOnly bool
|
||||
}
|
||||
|
||||
func NewCommand() *cobra.Command {
|
||||
|
@ -88,6 +88,7 @@ func NewCommand() *cobra.Command {
|
|||
config = serverConfig{
|
||||
pluginDir: "/plugins",
|
||||
metricsAddress: defaultMetricsAddress,
|
||||
defaultBackupLocation: "default",
|
||||
backupSyncPeriod: defaultBackupSyncPeriod,
|
||||
podVolumeOperationTimeout: defaultPodVolumeOperationTimeout,
|
||||
restoreResourcePriorities: defaultRestorePriorities,
|
||||
|
@ -140,6 +141,7 @@ func NewCommand() *cobra.Command {
|
|||
command.Flags().DurationVar(&config.podVolumeOperationTimeout, "restic-timeout", config.podVolumeOperationTimeout, "how long backups/restores of pod volumes should be allowed to run before timing out")
|
||||
command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled")
|
||||
command.Flags().StringSliceVar(&config.restoreResourcePriorities, "restore-resource-priorities", config.restoreResourcePriorities, "desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources")
|
||||
command.Flags().StringVar(&config.defaultBackupLocation, "default-backup-storage-location", config.defaultBackupLocation, "name of the default backup storage location")
|
||||
|
||||
return command
|
||||
}
|
||||
|
@ -164,7 +166,6 @@ type server struct {
|
|||
kubeClientConfig *rest.Config
|
||||
kubeClient kubernetes.Interface
|
||||
arkClient clientset.Interface
|
||||
objectStore cloudprovider.ObjectStore
|
||||
blockStore cloudprovider.BlockStore
|
||||
discoveryClient discovery.DiscoveryInterface
|
||||
discoveryHelper arkdiscovery.Helper
|
||||
|
@ -221,7 +222,7 @@ func newServer(namespace, baseName string, config serverConfig, logger *logrus.L
|
|||
arkClient: arkClient,
|
||||
discoveryClient: arkClient.Discovery(),
|
||||
dynamicClient: dynamicClient,
|
||||
sharedInformerFactory: informers.NewFilteredSharedInformerFactory(arkClient, 0, namespace, nil),
|
||||
sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(arkClient, 0, informers.WithNamespace(namespace)),
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
logger: logger,
|
||||
|
@ -267,11 +268,10 @@ func (s *server) run() error {
|
|||
|
||||
s.watchConfig(originalConfig)
|
||||
|
||||
objectStore, err := getObjectStore(config.BackupStorageProvider.CloudProviderConfig, s.pluginManager)
|
||||
backupStorageLocation, err := s.arkClient.ArkV1().BackupStorageLocations(s.namespace).Get(s.config.defaultBackupLocation, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
s.objectStore = objectStore
|
||||
|
||||
if config.PersistentVolumeProvider == nil {
|
||||
s.logger.Info("PersistentVolumeProvider config not provided, volume snapshots and restores are disabled")
|
||||
|
@ -284,13 +284,13 @@ func (s *server) run() error {
|
|||
s.blockStore = blockStore
|
||||
}
|
||||
|
||||
if config.BackupStorageProvider.ResticLocation != "" {
|
||||
if err := s.initRestic(config.BackupStorageProvider); err != nil {
|
||||
if backupStorageLocation.Spec.Config[restic.ResticLocationConfigKey] != "" {
|
||||
if err := s.initRestic(backupStorageLocation.Spec.Provider); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.runControllers(config); err != nil {
|
||||
if err := s.runControllers(config, backupStorageLocation); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -312,15 +312,6 @@ func (s *server) applyConfigDefaults(c *api.Config) {
|
|||
} else {
|
||||
s.logger.WithField("priorities", s.config.restoreResourcePriorities).Info("Using given resource priorities")
|
||||
}
|
||||
|
||||
if c.BackupStorageProvider.Config == nil {
|
||||
c.BackupStorageProvider.Config = make(map[string]string)
|
||||
}
|
||||
|
||||
// add the bucket name to the config map so that object stores can use
|
||||
// it when initializing. The AWS object store uses this to determine the
|
||||
// bucket's region when setting up its client.
|
||||
c.BackupStorageProvider.Config["bucket"] = c.BackupStorageProvider.Bucket
|
||||
}
|
||||
|
||||
// namespaceExists returns nil if namespace can be successfully
|
||||
|
@ -480,23 +471,6 @@ func (s *server) watchConfig(config *api.Config) {
|
|||
})
|
||||
}
|
||||
|
||||
func getObjectStore(cloudConfig api.CloudProviderConfig, manager plugin.Manager) (cloudprovider.ObjectStore, error) {
|
||||
if cloudConfig.Name == "" {
|
||||
return nil, errors.New("object storage provider name must not be empty")
|
||||
}
|
||||
|
||||
objectStore, err := manager.GetObjectStore(cloudConfig.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := objectStore.Init(cloudConfig.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return objectStore, nil
|
||||
}
|
||||
|
||||
func getBlockStore(cloudConfig api.CloudProviderConfig, manager plugin.Manager) (cloudprovider.BlockStore, error) {
|
||||
if cloudConfig.Name == "" {
|
||||
return nil, errors.New("block storage provider name must not be empty")
|
||||
|
@ -514,14 +488,7 @@ func getBlockStore(cloudConfig api.CloudProviderConfig, manager plugin.Manager)
|
|||
return blockStore, nil
|
||||
}
|
||||
|
||||
func durationMin(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (s *server) initRestic(config api.ObjectStorageProviderConfig) error {
|
||||
func (s *server) initRestic(providerName string) error {
|
||||
// warn if restic daemonset does not exist
|
||||
if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(restic.DaemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) {
|
||||
s.logger.Warn("Ark restic daemonset not found; restic backups/restores will not work until it's created")
|
||||
|
@ -535,7 +502,7 @@ func (s *server) initRestic(config api.ObjectStorageProviderConfig) error {
|
|||
}
|
||||
|
||||
// set the env vars that restic uses for creds purposes
|
||||
if config.Name == string(restic.AzureBackend) {
|
||||
if providerName == string(restic.AzureBackend) {
|
||||
os.Setenv("AZURE_ACCOUNT_NAME", os.Getenv("AZURE_STORAGE_ACCOUNT_ID"))
|
||||
os.Setenv("AZURE_ACCOUNT_KEY", os.Getenv("AZURE_STORAGE_KEY"))
|
||||
}
|
||||
|
@ -574,18 +541,12 @@ func (s *server) initRestic(config api.ObjectStorageProviderConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *server) runControllers(config *api.Config) error {
|
||||
func (s *server) runControllers(config *api.Config, defaultBackupLocation *api.BackupStorageLocation) error {
|
||||
s.logger.Info("Starting controllers")
|
||||
|
||||
ctx := s.ctx
|
||||
var wg sync.WaitGroup
|
||||
|
||||
cloudBackupCacheResyncPeriod := durationMin(controller.GCSyncPeriod, s.config.backupSyncPeriod)
|
||||
s.logger.Infof("Caching cloud backups every %s", cloudBackupCacheResyncPeriod)
|
||||
|
||||
liveBackupLister := cloudprovider.NewLiveBackupLister(s.logger, s.objectStore)
|
||||
cachedBackupLister := cloudprovider.NewBackupCache(ctx, liveBackupLister, cloudBackupCacheResyncPeriod, s.logger)
|
||||
|
||||
go func() {
|
||||
metricsMux := http.NewServeMux()
|
||||
metricsMux.Handle("/metrics", promhttp.Handler())
|
||||
|
@ -597,13 +558,18 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.metrics = metrics.NewServerMetrics()
|
||||
s.metrics.RegisterAllMetrics()
|
||||
|
||||
newPluginManager := func(logger logrus.FieldLogger) plugin.Manager {
|
||||
return plugin.NewManager(logger, s.logLevel, s.pluginRegistry)
|
||||
}
|
||||
|
||||
backupSyncController := controller.NewBackupSyncController(
|
||||
s.arkClient.ArkV1(),
|
||||
cachedBackupLister,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
s.sharedInformerFactory.Ark().V1().BackupStorageLocations(),
|
||||
s.config.backupSyncPeriod,
|
||||
s.namespace,
|
||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
s.config.defaultBackupLocation,
|
||||
newPluginManager,
|
||||
s.logger,
|
||||
)
|
||||
wg.Add(1)
|
||||
|
@ -631,13 +597,13 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
s.arkClient.ArkV1(),
|
||||
backupper,
|
||||
config.BackupStorageProvider.CloudProviderConfig,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.blockStore != nil,
|
||||
s.logger,
|
||||
s.logLevel,
|
||||
s.pluginRegistry,
|
||||
newPluginManager,
|
||||
backupTracker,
|
||||
s.sharedInformerFactory.Ark().V1().BackupStorageLocations(),
|
||||
s.config.defaultBackupLocation,
|
||||
s.metrics,
|
||||
)
|
||||
wg.Add(1)
|
||||
|
@ -678,13 +644,13 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.arkClient.ArkV1(), // deleteBackupRequestClient
|
||||
s.arkClient.ArkV1(), // backupClient
|
||||
s.blockStore,
|
||||
s.objectStore,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.sharedInformerFactory.Ark().V1().Restores(),
|
||||
s.arkClient.ArkV1(), // restoreClient
|
||||
backupTracker,
|
||||
s.resticManager,
|
||||
s.sharedInformerFactory.Ark().V1().PodVolumeBackups(),
|
||||
s.sharedInformerFactory.Ark().V1().BackupStorageLocations(),
|
||||
newPluginManager,
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
|
@ -713,13 +679,13 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.arkClient.ArkV1(),
|
||||
s.arkClient.ArkV1(),
|
||||
restorer,
|
||||
config.BackupStorageProvider.CloudProviderConfig,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
s.sharedInformerFactory.Ark().V1().BackupStorageLocations(),
|
||||
s.blockStore != nil,
|
||||
s.logger,
|
||||
s.logLevel,
|
||||
s.pluginRegistry,
|
||||
newPluginManager,
|
||||
s.config.defaultBackupLocation,
|
||||
s.metrics,
|
||||
)
|
||||
|
||||
|
@ -733,8 +699,9 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.arkClient.ArkV1(),
|
||||
s.sharedInformerFactory.Ark().V1().DownloadRequests(),
|
||||
s.sharedInformerFactory.Ark().V1().Restores(),
|
||||
s.objectStore,
|
||||
config.BackupStorageProvider.Bucket,
|
||||
s.sharedInformerFactory.Ark().V1().BackupStorageLocations(),
|
||||
s.sharedInformerFactory.Ark().V1().Backups(),
|
||||
newPluginManager,
|
||||
s.logger,
|
||||
)
|
||||
wg.Add(1)
|
||||
|
@ -748,7 +715,7 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
s.logger,
|
||||
s.sharedInformerFactory.Ark().V1().ResticRepositories(),
|
||||
s.arkClient.ArkV1(),
|
||||
config.BackupStorageProvider,
|
||||
defaultBackupLocation,
|
||||
s.resticManager,
|
||||
)
|
||||
wg.Add(1)
|
||||
|
@ -763,7 +730,7 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
// SHARED INFORMERS HAVE TO BE STARTED AFTER ALL CONTROLLERS
|
||||
go s.sharedInformerFactory.Start(ctx.Done())
|
||||
|
||||
// Remove this sometime after v0.8.0
|
||||
// TODO(1.0): remove
|
||||
cache.WaitForCacheSync(ctx.Done(), s.sharedInformerFactory.Ark().V1().Backups().Informer().HasSynced)
|
||||
s.removeDeprecatedGCFinalizer()
|
||||
|
||||
|
@ -777,9 +744,10 @@ func (s *server) runControllers(config *api.Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
const gcFinalizer = "gc.ark.heptio.com"
|
||||
|
||||
// TODO(1.0): remove
|
||||
func (s *server) removeDeprecatedGCFinalizer() {
|
||||
const gcFinalizer = "gc.ark.heptio.com"
|
||||
|
||||
backups, err := s.sharedInformerFactory.Ark().V1().Backups().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("error listing backups from cache - unable to remove old finalizers")
|
||||
|
|
|
@ -21,19 +21,19 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// DescribeBackup describes a backup in human-readable format.
|
||||
func DescribeBackup(backup *v1.Backup, deleteRequests []v1.DeleteBackupRequest, podVolumeBackups []v1.PodVolumeBackup, volumeDetails bool) string {
|
||||
func DescribeBackup(backup *arkv1api.Backup, deleteRequests []arkv1api.DeleteBackupRequest, podVolumeBackups []arkv1api.PodVolumeBackup, volumeDetails bool) string {
|
||||
return Describe(func(d *Describer) {
|
||||
d.DescribeMetadata(backup.ObjectMeta)
|
||||
|
||||
d.Println()
|
||||
phase := backup.Status.Phase
|
||||
if phase == "" {
|
||||
phase = v1.BackupPhaseNew
|
||||
phase = arkv1api.BackupPhaseNew
|
||||
}
|
||||
d.Printf("Phase:\t%s\n", phase)
|
||||
|
||||
|
@ -56,7 +56,7 @@ func DescribeBackup(backup *v1.Backup, deleteRequests []v1.DeleteBackupRequest,
|
|||
}
|
||||
|
||||
// DescribeBackupSpec describes a backup spec in human-readable format.
|
||||
func DescribeBackupSpec(d *Describer, spec v1.BackupSpec) {
|
||||
func DescribeBackupSpec(d *Describer, spec arkv1api.BackupSpec) {
|
||||
// TODO make a helper for this and use it in all the describers.
|
||||
d.Printf("Namespaces:\n")
|
||||
var s string
|
||||
|
@ -97,6 +97,9 @@ func DescribeBackupSpec(d *Describer, spec v1.BackupSpec) {
|
|||
}
|
||||
d.Printf("Label selector:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
d.Printf("Storage Location:\t%s\n", spec.StorageLocation)
|
||||
|
||||
d.Println()
|
||||
d.Printf("Snapshot PVs:\t%s\n", BoolPointerString(spec.SnapshotVolumes, "false", "true", "auto"))
|
||||
|
||||
|
@ -164,7 +167,7 @@ func DescribeBackupSpec(d *Describer, spec v1.BackupSpec) {
|
|||
}
|
||||
|
||||
// DescribeBackupStatus describes a backup status in human-readable format.
|
||||
func DescribeBackupStatus(d *Describer, status v1.BackupStatus) {
|
||||
func DescribeBackupStatus(d *Describer, status arkv1api.BackupStatus) {
|
||||
d.Printf("Backup Format Version:\t%d\n", status.Version)
|
||||
|
||||
d.Println()
|
||||
|
@ -213,7 +216,7 @@ func DescribeBackupStatus(d *Describer, status v1.BackupStatus) {
|
|||
}
|
||||
|
||||
// DescribeDeleteBackupRequests describes delete backup requests in human-readable format.
|
||||
func DescribeDeleteBackupRequests(d *Describer, requests []v1.DeleteBackupRequest) {
|
||||
func DescribeDeleteBackupRequests(d *Describer, requests []arkv1api.DeleteBackupRequest) {
|
||||
d.Printf("Deletion Attempts")
|
||||
if count := failedDeletionCount(requests); count > 0 {
|
||||
d.Printf(" (%d failed)", count)
|
||||
|
@ -238,10 +241,10 @@ func DescribeDeleteBackupRequests(d *Describer, requests []v1.DeleteBackupReques
|
|||
}
|
||||
}
|
||||
|
||||
func failedDeletionCount(requests []v1.DeleteBackupRequest) int {
|
||||
func failedDeletionCount(requests []arkv1api.DeleteBackupRequest) int {
|
||||
var count int
|
||||
for _, req := range requests {
|
||||
if req.Status.Phase == v1.DeleteBackupRequestPhaseProcessed && len(req.Status.Errors) > 0 {
|
||||
if req.Status.Phase == arkv1api.DeleteBackupRequestPhaseProcessed && len(req.Status.Errors) > 0 {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
@ -249,7 +252,7 @@ func failedDeletionCount(requests []v1.DeleteBackupRequest) int {
|
|||
}
|
||||
|
||||
// DescribePodVolumeBackups describes pod volume backups in human-readable format.
|
||||
func DescribePodVolumeBackups(d *Describer, backups []v1.PodVolumeBackup, details bool) {
|
||||
func DescribePodVolumeBackups(d *Describer, backups []arkv1api.PodVolumeBackup, details bool) {
|
||||
if details {
|
||||
d.Printf("Restic Backups:\n")
|
||||
} else {
|
||||
|
@ -261,10 +264,10 @@ func DescribePodVolumeBackups(d *Describer, backups []v1.PodVolumeBackup, detail
|
|||
|
||||
// go through phases in a specific order
|
||||
for _, phase := range []string{
|
||||
string(v1.PodVolumeBackupPhaseCompleted),
|
||||
string(v1.PodVolumeBackupPhaseFailed),
|
||||
string(arkv1api.PodVolumeBackupPhaseCompleted),
|
||||
string(arkv1api.PodVolumeBackupPhaseFailed),
|
||||
"In Progress",
|
||||
string(v1.PodVolumeBackupPhaseNew),
|
||||
string(arkv1api.PodVolumeBackupPhaseNew),
|
||||
} {
|
||||
if len(backupsByPhase[phase]) == 0 {
|
||||
continue
|
||||
|
@ -293,15 +296,15 @@ func DescribePodVolumeBackups(d *Describer, backups []v1.PodVolumeBackup, detail
|
|||
}
|
||||
}
|
||||
|
||||
func groupByPhase(backups []v1.PodVolumeBackup) map[string][]v1.PodVolumeBackup {
|
||||
backupsByPhase := make(map[string][]v1.PodVolumeBackup)
|
||||
func groupByPhase(backups []arkv1api.PodVolumeBackup) map[string][]arkv1api.PodVolumeBackup {
|
||||
backupsByPhase := make(map[string][]arkv1api.PodVolumeBackup)
|
||||
|
||||
phaseToGroup := map[v1.PodVolumeBackupPhase]string{
|
||||
v1.PodVolumeBackupPhaseCompleted: string(v1.PodVolumeBackupPhaseCompleted),
|
||||
v1.PodVolumeBackupPhaseFailed: string(v1.PodVolumeBackupPhaseFailed),
|
||||
v1.PodVolumeBackupPhaseInProgress: "In Progress",
|
||||
v1.PodVolumeBackupPhaseNew: string(v1.PodVolumeBackupPhaseNew),
|
||||
"": string(v1.PodVolumeBackupPhaseNew),
|
||||
phaseToGroup := map[arkv1api.PodVolumeBackupPhase]string{
|
||||
arkv1api.PodVolumeBackupPhaseCompleted: string(arkv1api.PodVolumeBackupPhaseCompleted),
|
||||
arkv1api.PodVolumeBackupPhaseFailed: string(arkv1api.PodVolumeBackupPhaseFailed),
|
||||
arkv1api.PodVolumeBackupPhaseInProgress: "In Progress",
|
||||
arkv1api.PodVolumeBackupPhaseNew: string(arkv1api.PodVolumeBackupPhaseNew),
|
||||
"": string(arkv1api.PodVolumeBackupPhaseNew),
|
||||
}
|
||||
|
||||
for _, backup := range backups {
|
||||
|
|
|
@ -27,14 +27,14 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
backupColumns = []string{"NAME", "STATUS", "CREATED", "EXPIRES", "SELECTOR"}
|
||||
backupColumns = []string{"NAME", "STATUS", "CREATED", "EXPIRES", "STORAGE LOCATION", "SELECTOR"}
|
||||
)
|
||||
|
||||
func printBackupList(list *v1.BackupList, w io.Writer, options printers.PrintOptions) error {
|
||||
func printBackupList(list *arkv1api.BackupList, w io.Writer, options printers.PrintOptions) error {
|
||||
sortBackupsByPrefixAndTimestamp(list)
|
||||
|
||||
for i := range list.Items {
|
||||
|
@ -45,7 +45,7 @@ func printBackupList(list *v1.BackupList, w io.Writer, options printers.PrintOpt
|
|||
return nil
|
||||
}
|
||||
|
||||
func sortBackupsByPrefixAndTimestamp(list *v1.BackupList) {
|
||||
func sortBackupsByPrefixAndTimestamp(list *arkv1api.BackupList) {
|
||||
// sort by default alphabetically, but if backups stem from a common schedule
|
||||
// (detected by the presence of a 14-digit timestamp suffix), then within that
|
||||
// group, sort by newest to oldest (i.e. prefix ASC, suffix DESC)
|
||||
|
@ -70,7 +70,7 @@ func sortBackupsByPrefixAndTimestamp(list *v1.BackupList) {
|
|||
})
|
||||
}
|
||||
|
||||
func printBackup(backup *v1.Backup, w io.Writer, options printers.PrintOptions) error {
|
||||
func printBackup(backup *arkv1api.Backup, w io.Writer, options printers.PrintOptions) error {
|
||||
name := printers.FormatResourceName(options.Kind, backup.Name, options.WithKind)
|
||||
|
||||
if options.WithNamespace {
|
||||
|
@ -86,13 +86,15 @@ func printBackup(backup *v1.Backup, w io.Writer, options printers.PrintOptions)
|
|||
|
||||
status := backup.Status.Phase
|
||||
if status == "" {
|
||||
status = v1.BackupPhaseNew
|
||||
status = arkv1api.BackupPhaseNew
|
||||
}
|
||||
if backup.DeletionTimestamp != nil && !backup.DeletionTimestamp.Time.IsZero() {
|
||||
status = "Deleting"
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil {
|
||||
location := backup.Spec.StorageLocation
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), location, metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package output
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
backupStorageLocationColumns = []string{"NAME", "PROVIDER", "BUCKET/PREFIX"}
|
||||
)
|
||||
|
||||
func printBackupStorageLocationList(list *v1.BackupStorageLocationList, w io.Writer, options printers.PrintOptions) error {
|
||||
for i := range list.Items {
|
||||
if err := printBackupStorageLocation(&list.Items[i], w, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printBackupStorageLocation(location *v1.BackupStorageLocation, w io.Writer, options printers.PrintOptions) error {
|
||||
name := printers.FormatResourceName(options.Kind, location.Name, options.WithKind)
|
||||
|
||||
if options.WithNamespace {
|
||||
if _, err := fmt.Fprintf(w, "%s\t", location.Namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bucketAndPrefix := location.Spec.ObjectStorage.Bucket
|
||||
if location.Spec.ObjectStorage.Prefix != "" {
|
||||
bucketAndPrefix += "/" + location.Spec.ObjectStorage.Prefix
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s",
|
||||
name,
|
||||
location.Spec.Provider,
|
||||
bucketAndPrefix,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprint(w, printers.AppendLabels(location.Labels, options.ColumnLabels)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, location.Labels))
|
||||
return err
|
||||
}
|
|
@ -143,6 +143,8 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
|
|||
printer.Handler(scheduleColumns, nil, printScheduleList)
|
||||
printer.Handler(resticRepoColumns, nil, printResticRepo)
|
||||
printer.Handler(resticRepoColumns, nil, printResticRepoList)
|
||||
printer.Handler(backupStorageLocationColumns, nil, printBackupStorageLocation)
|
||||
printer.Handler(backupStorageLocationColumns, nil, printBackupStorageLocationList)
|
||||
|
||||
err = printer.PrintObj(obj, os.Stdout)
|
||||
if err != nil {
|
||||
|
|
|
@ -57,57 +57,53 @@ import (
|
|||
const backupVersion = 1
|
||||
|
||||
type backupController struct {
|
||||
backupper backup.Backupper
|
||||
objectStoreConfig api.CloudProviderConfig
|
||||
bucket string
|
||||
pvProviderExists bool
|
||||
lister listers.BackupLister
|
||||
listerSynced cache.InformerSynced
|
||||
client arkv1client.BackupsGetter
|
||||
syncHandler func(backupName string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
clock clock.Clock
|
||||
logger logrus.FieldLogger
|
||||
logLevel logrus.Level
|
||||
pluginRegistry plugin.Registry
|
||||
backupTracker BackupTracker
|
||||
metrics *metrics.ServerMetrics
|
||||
|
||||
newPluginManager func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager
|
||||
backupper backup.Backupper
|
||||
pvProviderExists bool
|
||||
lister listers.BackupLister
|
||||
listerSynced cache.InformerSynced
|
||||
client arkv1client.BackupsGetter
|
||||
syncHandler func(backupName string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
clock clock.Clock
|
||||
logger logrus.FieldLogger
|
||||
logLevel logrus.Level
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager
|
||||
backupTracker BackupTracker
|
||||
backupLocationLister listers.BackupStorageLocationLister
|
||||
backupLocationListerSynced cache.InformerSynced
|
||||
defaultBackupLocation string
|
||||
metrics *metrics.ServerMetrics
|
||||
}
|
||||
|
||||
func NewBackupController(
|
||||
backupInformer informers.BackupInformer,
|
||||
client arkv1client.BackupsGetter,
|
||||
backupper backup.Backupper,
|
||||
objectStoreConfig api.CloudProviderConfig,
|
||||
bucket string,
|
||||
pvProviderExists bool,
|
||||
logger logrus.FieldLogger,
|
||||
logLevel logrus.Level,
|
||||
pluginRegistry plugin.Registry,
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager,
|
||||
backupTracker BackupTracker,
|
||||
backupLocationInformer informers.BackupStorageLocationInformer,
|
||||
defaultBackupLocation string,
|
||||
metrics *metrics.ServerMetrics,
|
||||
) Interface {
|
||||
c := &backupController{
|
||||
backupper: backupper,
|
||||
objectStoreConfig: objectStoreConfig,
|
||||
bucket: bucket,
|
||||
pvProviderExists: pvProviderExists,
|
||||
lister: backupInformer.Lister(),
|
||||
listerSynced: backupInformer.Informer().HasSynced,
|
||||
client: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "backup"),
|
||||
clock: &clock.RealClock{},
|
||||
logger: logger,
|
||||
logLevel: logLevel,
|
||||
pluginRegistry: pluginRegistry,
|
||||
backupTracker: backupTracker,
|
||||
metrics: metrics,
|
||||
|
||||
newPluginManager: func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager {
|
||||
return plugin.NewManager(logger, logLevel, pluginRegistry)
|
||||
},
|
||||
backupper: backupper,
|
||||
pvProviderExists: pvProviderExists,
|
||||
lister: backupInformer.Lister(),
|
||||
listerSynced: backupInformer.Informer().HasSynced,
|
||||
client: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "backup"),
|
||||
clock: &clock.RealClock{},
|
||||
logger: logger,
|
||||
logLevel: logLevel,
|
||||
newPluginManager: newPluginManager,
|
||||
backupTracker: backupTracker,
|
||||
backupLocationLister: backupLocationInformer.Lister(),
|
||||
backupLocationListerSynced: backupLocationInformer.Informer().HasSynced,
|
||||
defaultBackupLocation: defaultBackupLocation,
|
||||
metrics: metrics,
|
||||
}
|
||||
|
||||
c.syncHandler = c.processBackup
|
||||
|
@ -165,7 +161,7 @@ func (controller *backupController) Run(ctx context.Context, numWorkers int) err
|
|||
defer controller.logger.Info("Shutting down BackupController")
|
||||
|
||||
controller.logger.Info("Waiting for caches to sync")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), controller.listerSynced) {
|
||||
if !cache.WaitForCacheSync(ctx.Done(), controller.listerSynced, controller.backupLocationListerSynced) {
|
||||
return errors.New("timed out waiting for caches to sync")
|
||||
}
|
||||
controller.logger.Info("Caches are synced")
|
||||
|
@ -259,8 +255,9 @@ func (controller *backupController) processBackup(key string) error {
|
|||
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
|
||||
}
|
||||
|
||||
var backupLocation *api.BackupStorageLocation
|
||||
// validation
|
||||
if backup.Status.ValidationErrors = controller.getValidationErrors(backup); len(backup.Status.ValidationErrors) > 0 {
|
||||
if backupLocation, backup.Status.ValidationErrors = controller.getLocationAndValidate(backup, controller.defaultBackupLocation); len(backup.Status.ValidationErrors) > 0 {
|
||||
backup.Status.Phase = api.BackupPhaseFailedValidation
|
||||
} else {
|
||||
backup.Status.Phase = api.BackupPhaseInProgress
|
||||
|
@ -287,7 +284,7 @@ func (controller *backupController) processBackup(key string) error {
|
|||
backupScheduleName := backup.GetLabels()["ark-schedule"]
|
||||
controller.metrics.RegisterBackupAttempt(backupScheduleName)
|
||||
|
||||
if err := controller.runBackup(backup, controller.bucket); err != nil {
|
||||
if err := controller.runBackup(backup, backupLocation); err != nil {
|
||||
logContext.WithError(err).Error("backup failed")
|
||||
backup.Status.Phase = api.BackupPhaseFailed
|
||||
controller.metrics.RegisterBackupFailed(backupScheduleName)
|
||||
|
@ -327,7 +324,7 @@ func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (controller *backupController) getValidationErrors(itm *api.Backup) []string {
|
||||
func (controller *backupController) getLocationAndValidate(itm *api.Backup, defaultBackupLocation string) (*api.BackupStorageLocation, []string) {
|
||||
var validationErrors []string
|
||||
|
||||
for _, err := range collections.ValidateIncludesExcludes(itm.Spec.IncludedResources, itm.Spec.ExcludedResources) {
|
||||
|
@ -342,10 +339,26 @@ func (controller *backupController) getValidationErrors(itm *api.Backup) []strin
|
|||
validationErrors = append(validationErrors, "Server is not configured for PV snapshots")
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
if itm.Spec.StorageLocation == "" {
|
||||
itm.Spec.StorageLocation = defaultBackupLocation
|
||||
}
|
||||
|
||||
// add the storage location as a label for easy filtering later.
|
||||
if itm.Labels == nil {
|
||||
itm.Labels = make(map[string]string)
|
||||
}
|
||||
itm.Labels[api.StorageLocationLabel] = itm.Spec.StorageLocation
|
||||
|
||||
var backupLocation *api.BackupStorageLocation
|
||||
backupLocation, err := controller.backupLocationLister.BackupStorageLocations(itm.Namespace).Get(itm.Spec.StorageLocation)
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Sprintf("Error getting backup storage location: %v", err))
|
||||
}
|
||||
|
||||
return backupLocation, validationErrors
|
||||
}
|
||||
|
||||
func (controller *backupController) runBackup(backup *api.Backup, bucket string) error {
|
||||
func (controller *backupController) runBackup(backup *api.Backup, backupLocation *api.BackupStorageLocation) error {
|
||||
log := controller.logger.WithField("backup", kubeutil.NamespaceAndName(backup))
|
||||
log.Info("Starting backup")
|
||||
backup.Status.StartTimestamp.Time = controller.clock.Now()
|
||||
|
@ -368,7 +381,7 @@ func (controller *backupController) runBackup(backup *api.Backup, bucket string)
|
|||
|
||||
log.Info("Starting backup")
|
||||
|
||||
pluginManager := controller.newPluginManager(log, log.Level, controller.pluginRegistry)
|
||||
pluginManager := controller.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
backupFile, err := ioutil.TempFile("", "")
|
||||
|
@ -382,7 +395,7 @@ func (controller *backupController) runBackup(backup *api.Backup, bucket string)
|
|||
return err
|
||||
}
|
||||
|
||||
objectStore, err := getObjectStore(controller.objectStoreConfig, pluginManager)
|
||||
objectStore, err := getObjectStoreForLocation(backupLocation, pluginManager)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -424,7 +437,7 @@ func (controller *backupController) runBackup(backup *api.Backup, bucket string)
|
|||
controller.logger.WithError(err).Error("error closing gzippedLogFile")
|
||||
}
|
||||
|
||||
if err := cloudprovider.UploadBackup(log, objectStore, bucket, backup.Name, backupJSONToUpload, backupFileToUpload, logFile); err != nil {
|
||||
if err := cloudprovider.UploadBackup(log, objectStore, backupLocation.Spec.ObjectStorage.Bucket, backup.Name, backupJSONToUpload, backupFileToUpload, logFile); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
|
@ -441,17 +454,27 @@ func (controller *backupController) runBackup(backup *api.Backup, bucket string)
|
|||
}
|
||||
|
||||
// TODO(ncdc): move this to a better location that isn't backup specific
|
||||
func getObjectStore(cloudConfig api.CloudProviderConfig, manager plugin.Manager) (cloudprovider.ObjectStore, error) {
|
||||
if cloudConfig.Name == "" {
|
||||
return nil, errors.New("object storage provider name must not be empty")
|
||||
func getObjectStoreForLocation(location *api.BackupStorageLocation, manager plugin.Manager) (cloudprovider.ObjectStore, error) {
|
||||
if location.Spec.Provider == "" {
|
||||
return nil, errors.New("backup storage location provider name must not be empty")
|
||||
}
|
||||
|
||||
objectStore, err := manager.GetObjectStore(cloudConfig.Name)
|
||||
objectStore, err := manager.GetObjectStore(location.Spec.Provider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := objectStore.Init(cloudConfig.Config); err != nil {
|
||||
// add the bucket name to the config map so that object stores can use
|
||||
// it when initializing. The AWS object store uses this to determine the
|
||||
// bucket's region when setting up its client.
|
||||
if location.Spec.ObjectStorage != nil {
|
||||
if location.Spec.Config == nil {
|
||||
location.Spec.Config = make(map[string]string)
|
||||
}
|
||||
location.Spec.Config["bucket"] = location.Spec.ObjectStorage.Bucket
|
||||
}
|
||||
|
||||
if err := objectStore.Init(location.Spec.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -25,13 +25,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -152,6 +151,24 @@ func TestProcessBackup(t *testing.T) {
|
|||
allowSnapshots: true,
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "Backup without a location will have it set to the default",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew),
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "Backup with a location completes",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithStorageLocation("loc1"),
|
||||
expectBackup: true,
|
||||
},
|
||||
{
|
||||
name: "Backup with non-existent location will fail validation",
|
||||
key: "heptio-ark/backup1",
|
||||
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithStorageLocation("loc2"),
|
||||
expectBackup: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -161,7 +178,6 @@ func TestProcessBackup(t *testing.T) {
|
|||
backupper = &fakeBackupper{}
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger = logging.DefaultLogger(logrus.DebugLevel)
|
||||
pluginRegistry = plugin.NewRegistry("/dir", logger, logrus.InfoLevel)
|
||||
clockTime, _ = time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006")
|
||||
objectStore = &arktest.ObjectStore{}
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
|
@ -174,20 +190,17 @@ func TestProcessBackup(t *testing.T) {
|
|||
sharedInformers.Ark().V1().Backups(),
|
||||
client.ArkV1(),
|
||||
backupper,
|
||||
v1.CloudProviderConfig{Name: "myCloud"},
|
||||
"bucket",
|
||||
test.allowSnapshots,
|
||||
logger,
|
||||
logrus.InfoLevel,
|
||||
pluginRegistry,
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
NewBackupTracker(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
"default",
|
||||
metrics.NewServerMetrics(),
|
||||
).(*backupController)
|
||||
|
||||
c.clock = clock.NewFakeClock(clockTime)
|
||||
c.newPluginManager = func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager {
|
||||
return pluginManager
|
||||
}
|
||||
|
||||
var expiration, startTime time.Time
|
||||
|
||||
|
@ -224,6 +237,37 @@ func TestProcessBackup(t *testing.T) {
|
|||
mock.Anything, // actions
|
||||
).Return(nil)
|
||||
|
||||
defaultLocation := &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: "default",
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: "myCloud",
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
loc1 := &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: "loc1",
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: "myCloud",
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(defaultLocation))
|
||||
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(loc1))
|
||||
|
||||
pluginManager.On("GetBackupItemActions").Return(nil, nil)
|
||||
|
||||
// Ensure we have a CompletionTimestamp when uploading.
|
||||
|
@ -312,9 +356,17 @@ func TestProcessBackup(t *testing.T) {
|
|||
StartTimestamp metav1.Time `json:"startTimestamp"`
|
||||
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
|
||||
}
|
||||
type SpecPatch struct {
|
||||
StorageLocation string `json:"storageLocation"`
|
||||
}
|
||||
type ObjectMetaPatch struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Status StatusPatch `json:"status"`
|
||||
Status StatusPatch `json:"status"`
|
||||
Spec SpecPatch `json:"spec,omitempty"`
|
||||
ObjectMeta ObjectMetaPatch `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
decode := func(decoder *json.Decoder) (interface{}, error) {
|
||||
|
@ -324,13 +376,37 @@ func TestProcessBackup(t *testing.T) {
|
|||
return *actual, err
|
||||
}
|
||||
|
||||
// validate Patch call 1 (setting version, expiration, and phase)
|
||||
expected := Patch{
|
||||
Status: StatusPatch{
|
||||
Version: 1,
|
||||
Phase: v1.BackupPhaseInProgress,
|
||||
Expiration: expiration,
|
||||
},
|
||||
// validate Patch call 1 (setting version, expiration, phase, and storage location)
|
||||
var expected Patch
|
||||
if test.backup.Spec.StorageLocation == "" {
|
||||
expected = Patch{
|
||||
Status: StatusPatch{
|
||||
Version: 1,
|
||||
Phase: v1.BackupPhaseInProgress,
|
||||
Expiration: expiration,
|
||||
},
|
||||
Spec: SpecPatch{
|
||||
StorageLocation: "default",
|
||||
},
|
||||
ObjectMeta: ObjectMetaPatch{
|
||||
Labels: map[string]string{
|
||||
v1.StorageLocationLabel: "default",
|
||||
},
|
||||
},
|
||||
}
|
||||
} else {
|
||||
expected = Patch{
|
||||
Status: StatusPatch{
|
||||
Version: 1,
|
||||
Phase: v1.BackupPhaseInProgress,
|
||||
Expiration: expiration,
|
||||
},
|
||||
ObjectMeta: ObjectMetaPatch{
|
||||
Labels: map[string]string{
|
||||
v1.StorageLocationLabel: test.backup.Spec.StorageLocation,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
arktest.ValidatePatch(t, actions[0], expected, decode)
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
|
||||
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
"github.com/heptio/ark/pkg/restic"
|
||||
"github.com/heptio/ark/pkg/util/kube"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -50,17 +51,16 @@ type backupDeletionController struct {
|
|||
deleteBackupRequestLister listers.DeleteBackupRequestLister
|
||||
backupClient arkv1client.BackupsGetter
|
||||
blockStore cloudprovider.BlockStore
|
||||
objectStore cloudprovider.ObjectStore
|
||||
bucket string
|
||||
restoreLister listers.RestoreLister
|
||||
restoreClient arkv1client.RestoresGetter
|
||||
backupTracker BackupTracker
|
||||
resticMgr restic.RepositoryManager
|
||||
podvolumeBackupLister listers.PodVolumeBackupLister
|
||||
|
||||
deleteBackupDir cloudprovider.DeleteBackupDirFunc
|
||||
processRequestFunc func(*v1.DeleteBackupRequest) error
|
||||
clock clock.Clock
|
||||
backupLocationLister listers.BackupStorageLocationLister
|
||||
deleteBackupDir cloudprovider.DeleteBackupDirFunc
|
||||
processRequestFunc func(*v1.DeleteBackupRequest) error
|
||||
clock clock.Clock
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager
|
||||
}
|
||||
|
||||
// NewBackupDeletionController creates a new backup deletion controller.
|
||||
|
@ -70,13 +70,13 @@ func NewBackupDeletionController(
|
|||
deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter,
|
||||
backupClient arkv1client.BackupsGetter,
|
||||
blockStore cloudprovider.BlockStore,
|
||||
objectStore cloudprovider.ObjectStore,
|
||||
bucket string,
|
||||
restoreInformer informers.RestoreInformer,
|
||||
restoreClient arkv1client.RestoresGetter,
|
||||
backupTracker BackupTracker,
|
||||
resticMgr restic.RepositoryManager,
|
||||
podvolumeBackupInformer informers.PodVolumeBackupInformer,
|
||||
backupLocationInformer informers.BackupStorageLocationInformer,
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager,
|
||||
) Interface {
|
||||
c := &backupDeletionController{
|
||||
genericController: newGenericController("backup-deletion", logger),
|
||||
|
@ -84,16 +84,19 @@ func NewBackupDeletionController(
|
|||
deleteBackupRequestLister: deleteBackupRequestInformer.Lister(),
|
||||
backupClient: backupClient,
|
||||
blockStore: blockStore,
|
||||
objectStore: objectStore,
|
||||
bucket: bucket,
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
restoreClient: restoreClient,
|
||||
backupTracker: backupTracker,
|
||||
resticMgr: resticMgr,
|
||||
podvolumeBackupLister: podvolumeBackupInformer.Lister(),
|
||||
backupLocationLister: backupLocationInformer.Lister(),
|
||||
|
||||
podvolumeBackupLister: podvolumeBackupInformer.Lister(),
|
||||
deleteBackupDir: cloudprovider.DeleteBackupDir,
|
||||
clock: &clock.RealClock{},
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
newPluginManager: newPluginManager,
|
||||
deleteBackupDir: cloudprovider.DeleteBackupDir,
|
||||
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
|
||||
c.syncHandler = c.processQueueItem
|
||||
|
@ -102,6 +105,7 @@ func NewBackupDeletionController(
|
|||
deleteBackupRequestInformer.Informer().HasSynced,
|
||||
restoreInformer.Informer().HasSynced,
|
||||
podvolumeBackupInformer.Informer().HasSynced,
|
||||
backupLocationInformer.Informer().HasSynced,
|
||||
)
|
||||
c.processRequestFunc = c.processRequest
|
||||
|
||||
|
@ -240,7 +244,6 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
|
|||
|
||||
var errs []string
|
||||
|
||||
// Try to delete snapshots
|
||||
log.Info("Removing PV snapshots")
|
||||
for _, volumeBackup := range backup.Status.VolumeBackups {
|
||||
log.WithField("snapshotID", volumeBackup.SnapshotID).Info("Removing snapshot associated with backup")
|
||||
|
@ -249,7 +252,6 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
|
|||
}
|
||||
}
|
||||
|
||||
// Try to delete restic snapshots
|
||||
log.Info("Removing restic snapshots")
|
||||
if deleteErrs := c.deleteResticSnapshots(backup); len(deleteErrs) > 0 {
|
||||
for _, err := range deleteErrs {
|
||||
|
@ -257,13 +259,11 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
|
|||
}
|
||||
}
|
||||
|
||||
// Try to delete backup from backup storage
|
||||
log.Info("Removing backup from backup storage")
|
||||
if err := c.deleteBackupDir(log, c.objectStore, c.bucket, backup.Name); err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "error deleting backup from backup storage").Error())
|
||||
if err := c.deleteBackupFromStorage(backup, log); err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
|
||||
// Try to delete restores
|
||||
log.Info("Removing restores")
|
||||
if restores, err := c.restoreLister.Restores(backup.Namespace).List(labels.Everything()); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
|
||||
|
@ -312,6 +312,27 @@ func (c *backupDeletionController) processRequest(req *v1.DeleteBackupRequest) e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *backupDeletionController) deleteBackupFromStorage(backup *v1.Backup, log logrus.FieldLogger) error {
|
||||
pluginManager := c.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
backupLocation, err := c.backupLocationLister.BackupStorageLocations(backup.Namespace).Get(backup.Spec.StorageLocation)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
objectStore, err := getObjectStoreForLocation(backupLocation, pluginManager)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.deleteBackupDir(log, objectStore, backupLocation.Spec.ObjectStorage.Bucket, backup.Name); err != nil {
|
||||
return errors.Wrap(err, "error deleting backup from backup storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *backupDeletionController) deleteExistingDeletionRequests(req *v1.DeleteBackupRequest, log logrus.FieldLogger) []error {
|
||||
log.Info("Removing existing deletion requests for backup")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
|
|
|
@ -26,10 +26,13 @@ import (
|
|||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -49,13 +52,13 @@ func TestBackupDeletionControllerProcessQueueItem(t *testing.T) {
|
|||
client.ArkV1(), // deleteBackupRequestClient
|
||||
client.ArkV1(), // backupClient
|
||||
nil, // blockStore
|
||||
nil, // backupService
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Restores(),
|
||||
client.ArkV1(), // restoreClient
|
||||
NewBackupTracker(),
|
||||
nil, // restic repository manager
|
||||
sharedInformers.Ark().V1().PodVolumeBackups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
nil, // new plugin manager func
|
||||
).(*backupDeletionController)
|
||||
|
||||
// Error splitting key
|
||||
|
@ -109,37 +112,47 @@ type backupDeletionControllerTestData struct {
|
|||
client *fake.Clientset
|
||||
sharedInformers informers.SharedInformerFactory
|
||||
blockStore *arktest.FakeBlockStore
|
||||
objectStore *arktest.ObjectStore
|
||||
controller *backupDeletionController
|
||||
req *v1.DeleteBackupRequest
|
||||
}
|
||||
|
||||
func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletionControllerTestData {
|
||||
client := fake.NewSimpleClientset(objects...)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
blockStore := &arktest.FakeBlockStore{SnapshotsTaken: sets.NewString()}
|
||||
req := pkgbackup.NewDeleteBackupRequest("foo", "uid")
|
||||
var (
|
||||
client = fake.NewSimpleClientset(objects...)
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
blockStore = &arktest.FakeBlockStore{SnapshotsTaken: sets.NewString()}
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
objectStore = &arktest.ObjectStore{}
|
||||
req = pkgbackup.NewDeleteBackupRequest("foo", "uid")
|
||||
)
|
||||
|
||||
data := &backupDeletionControllerTestData{
|
||||
client: client,
|
||||
sharedInformers: sharedInformers,
|
||||
blockStore: blockStore,
|
||||
objectStore: objectStore,
|
||||
controller: NewBackupDeletionController(
|
||||
arktest.NewLogger(),
|
||||
sharedInformers.Ark().V1().DeleteBackupRequests(),
|
||||
client.ArkV1(), // deleteBackupRequestClient
|
||||
client.ArkV1(), // backupClient
|
||||
blockStore,
|
||||
nil, // objectStore
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Restores(),
|
||||
client.ArkV1(), // restoreClient
|
||||
NewBackupTracker(),
|
||||
nil, // restic repository manager
|
||||
sharedInformers.Ark().V1().PodVolumeBackups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
).(*backupDeletionController),
|
||||
|
||||
req: req,
|
||||
}
|
||||
|
||||
pluginManager.On("GetObjectStore", "objStoreProvider").Return(objectStore, nil)
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
|
||||
req.Namespace = "heptio-ark"
|
||||
req.Name = "foo-abcde"
|
||||
|
||||
|
@ -347,6 +360,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
t.Run("full delete, no errors", func(t *testing.T) {
|
||||
backup := arktest.NewTestBackup().WithName("foo").WithSnapshot("pv-1", "snap-1").Backup
|
||||
backup.UID = "uid"
|
||||
backup.Spec.StorageLocation = "primary"
|
||||
|
||||
restore1 := arktest.NewTestRestore("heptio-ark", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore
|
||||
restore2 := arktest.NewTestRestore("heptio-ark", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore
|
||||
|
@ -358,6 +372,24 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore2)
|
||||
td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore3)
|
||||
|
||||
location := &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: backup.Namespace,
|
||||
Name: backup.Spec.StorageLocation,
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: "bucket",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
require.NoError(t, td.sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location))
|
||||
|
||||
td.objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
// Clear out req labels to make sure the controller adds them
|
||||
td.req.Labels = make(map[string]string)
|
||||
|
||||
|
@ -374,8 +406,9 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
return true, backup, nil
|
||||
})
|
||||
|
||||
td.controller.deleteBackupDir = func(_ logrus.FieldLogger, _ cloudprovider.ObjectStore, bucket, backupName string) error {
|
||||
require.Equal(t, "bucket", bucket)
|
||||
td.controller.deleteBackupDir = func(_ logrus.FieldLogger, objectStore cloudprovider.ObjectStore, bucket, backupName string) error {
|
||||
require.NotNil(t, objectStore)
|
||||
require.Equal(t, location.Spec.ObjectStorage.Bucket, bucket)
|
||||
require.Equal(t, td.req.Spec.BackupName, backupName)
|
||||
return nil
|
||||
}
|
||||
|
@ -561,13 +594,13 @@ func TestBackupDeletionControllerDeleteExpiredRequests(t *testing.T) {
|
|||
client.ArkV1(), // deleteBackupRequestClient
|
||||
client.ArkV1(), // backupClient
|
||||
nil, // blockStore
|
||||
nil, // backupService
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Restores(),
|
||||
client.ArkV1(), // restoreClient
|
||||
NewBackupTracker(),
|
||||
nil,
|
||||
sharedInformers.Ark().V1().PodVolumeBackups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
nil, // new plugin manager func
|
||||
).(*backupDeletionController)
|
||||
|
||||
fakeClock := &clock.FakeClock{}
|
||||
|
|
|
@ -17,7 +17,6 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
@ -27,133 +26,176 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
|
||||
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
"github.com/heptio/ark/pkg/util/kube"
|
||||
"github.com/heptio/ark/pkg/util/stringslice"
|
||||
)
|
||||
|
||||
type backupSyncController struct {
|
||||
client arkv1client.BackupsGetter
|
||||
cloudBackupLister cloudprovider.BackupLister
|
||||
bucket string
|
||||
syncPeriod time.Duration
|
||||
namespace string
|
||||
backupLister listers.BackupLister
|
||||
backupInformerSynced cache.InformerSynced
|
||||
logger logrus.FieldLogger
|
||||
*genericController
|
||||
|
||||
client arkv1client.BackupsGetter
|
||||
backupLister listers.BackupLister
|
||||
backupStorageLocationLister listers.BackupStorageLocationLister
|
||||
namespace string
|
||||
defaultBackupLocation string
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager
|
||||
listCloudBackups func(logrus.FieldLogger, cloudprovider.ObjectStore, string) ([]*arkv1api.Backup, error)
|
||||
}
|
||||
|
||||
func NewBackupSyncController(
|
||||
client arkv1client.BackupsGetter,
|
||||
cloudBackupLister cloudprovider.BackupLister,
|
||||
bucket string,
|
||||
backupInformer informers.BackupInformer,
|
||||
backupStorageLocationInformer informers.BackupStorageLocationInformer,
|
||||
syncPeriod time.Duration,
|
||||
namespace string,
|
||||
backupInformer informers.BackupInformer,
|
||||
defaultBackupLocation string,
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
if syncPeriod < time.Minute {
|
||||
logger.Infof("Provided backup sync period %v is too short. Setting to 1 minute", syncPeriod)
|
||||
syncPeriod = time.Minute
|
||||
}
|
||||
return &backupSyncController{
|
||||
client: client,
|
||||
cloudBackupLister: cloudBackupLister,
|
||||
bucket: bucket,
|
||||
syncPeriod: syncPeriod,
|
||||
namespace: namespace,
|
||||
backupLister: backupInformer.Lister(),
|
||||
backupInformerSynced: backupInformer.Informer().HasSynced,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// Run is a blocking function that continually runs the object storage -> Ark API
|
||||
// sync process according to the controller's syncPeriod. It will return when it
|
||||
// receives on the ctx.Done() channel.
|
||||
func (c *backupSyncController) Run(ctx context.Context, workers int) error {
|
||||
c.logger.Info("Running backup sync controller")
|
||||
c.logger.Info("Waiting for caches to sync")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), c.backupInformerSynced) {
|
||||
return errors.New("timed out waiting for caches to sync")
|
||||
c := &backupSyncController{
|
||||
genericController: newGenericController("backup-sync", logger),
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
defaultBackupLocation: defaultBackupLocation,
|
||||
backupLister: backupInformer.Lister(),
|
||||
backupStorageLocationLister: backupStorageLocationInformer.Lister(),
|
||||
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
newPluginManager: newPluginManager,
|
||||
listCloudBackups: cloudprovider.ListBackups,
|
||||
}
|
||||
c.logger.Info("Caches are synced")
|
||||
wait.Until(c.run, c.syncPeriod, ctx.Done())
|
||||
return nil
|
||||
|
||||
c.resyncFunc = c.run
|
||||
c.resyncPeriod = syncPeriod
|
||||
c.cacheSyncWaiters = []cache.InformerSynced{
|
||||
backupInformer.Informer().HasSynced,
|
||||
backupStorageLocationInformer.Informer().HasSynced,
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
const gcFinalizer = "gc.ark.heptio.com"
|
||||
|
||||
func (c *backupSyncController) run() {
|
||||
c.logger.Info("Syncing backups from object storage")
|
||||
backups, err := c.cloudBackupLister.ListBackups(c.bucket)
|
||||
c.logger.Info("Syncing backups from backup storage into cluster")
|
||||
|
||||
locations, err := c.backupStorageLocationLister.BackupStorageLocations(c.namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
c.logger.WithError(err).Error("error listing backups")
|
||||
c.logger.WithError(errors.WithStack(err)).Error("Error getting backup storage locations from lister")
|
||||
return
|
||||
}
|
||||
c.logger.WithField("backupCount", len(backups)).Info("Got backups from object storage")
|
||||
// sync the default location first, if it exists
|
||||
locations = orderedBackupLocations(locations, c.defaultBackupLocation)
|
||||
|
||||
cloudBackupNames := sets.NewString()
|
||||
for _, cloudBackup := range backups {
|
||||
logContext := c.logger.WithField("backup", kube.NamespaceAndName(cloudBackup))
|
||||
logContext.Info("Syncing backup")
|
||||
pluginManager := c.newPluginManager(c.logger)
|
||||
|
||||
cloudBackupNames.Insert(cloudBackup.Name)
|
||||
for _, location := range locations {
|
||||
log := c.logger.WithField("backupLocation", location.Name)
|
||||
log.Info("Syncing backups from backup location")
|
||||
|
||||
// If we're syncing backups made by pre-0.8.0 versions, the server removes all finalizers
|
||||
// faster than the sync finishes. Just process them as we find them.
|
||||
cloudBackup.Finalizers = stringslice.Except(cloudBackup.Finalizers, gcFinalizer)
|
||||
|
||||
cloudBackup.Namespace = c.namespace
|
||||
cloudBackup.ResourceVersion = ""
|
||||
|
||||
// Backup only if backup does not exist in Kubernetes or if we are not able to get the backup for any reason.
|
||||
_, err := c.client.Backups(cloudBackup.Namespace).Get(cloudBackup.Name, metav1.GetOptions{})
|
||||
objectStore, err := getObjectStoreForLocation(location, pluginManager)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting object store for location")
|
||||
continue
|
||||
}
|
||||
|
||||
backupsInBackupStore, err := c.listCloudBackups(log, objectStore, location.Spec.ObjectStorage.Bucket)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error listing backups in object store")
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("backupCount", len(backupsInBackupStore)).Info("Got backups from object store")
|
||||
|
||||
cloudBackupNames := sets.NewString()
|
||||
for _, cloudBackup := range backupsInBackupStore {
|
||||
log = log.WithField("backup", kube.NamespaceAndName(cloudBackup))
|
||||
log.Debug("Checking cloud backup to see if it needs to be synced into the cluster")
|
||||
|
||||
cloudBackupNames.Insert(cloudBackup.Name)
|
||||
|
||||
// use the controller's namespace when getting the backup because that's where we
|
||||
// are syncing backups to, regardless of the namespace of the cloud backup.
|
||||
_, err := c.client.Backups(c.namespace).Get(cloudBackup.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
log.Debug("Backup already exists in cluster")
|
||||
continue
|
||||
}
|
||||
if !kuberrs.IsNotFound(err) {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error getting backup from client, proceeding with backup sync")
|
||||
log.WithError(errors.WithStack(err)).Error("Error getting backup from client, proceeding with sync into cluster")
|
||||
}
|
||||
|
||||
if _, err := c.client.Backups(cloudBackup.Namespace).Create(cloudBackup); err != nil && !kuberrs.IsAlreadyExists(err) {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error syncing backup from object storage")
|
||||
// remove the pre-v0.8.0 gcFinalizer if it exists
|
||||
// TODO(1.0): remove this
|
||||
cloudBackup.Finalizers = stringslice.Except(cloudBackup.Finalizers, gcFinalizer)
|
||||
cloudBackup.Namespace = c.namespace
|
||||
cloudBackup.ResourceVersion = ""
|
||||
|
||||
// update the StorageLocation field and label since the name of the location
|
||||
// may be different in this cluster than in the cluster that created the
|
||||
// backup.
|
||||
cloudBackup.Spec.StorageLocation = location.Name
|
||||
if cloudBackup.Labels == nil {
|
||||
cloudBackup.Labels = make(map[string]string)
|
||||
}
|
||||
cloudBackup.Labels[arkv1api.StorageLocationLabel] = cloudBackup.Spec.StorageLocation
|
||||
|
||||
_, err = c.client.Backups(cloudBackup.Namespace).Create(cloudBackup)
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debug("Backup already exists in cluster")
|
||||
case err != nil && !kuberrs.IsAlreadyExists(err):
|
||||
log.WithError(errors.WithStack(err)).Error("Error syncing backup into cluster")
|
||||
default:
|
||||
log.Debug("Synced backup into cluster")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.deleteUnused(cloudBackupNames)
|
||||
return
|
||||
c.deleteOrphanedBackups(location.Name, cloudBackupNames, log)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteUnused deletes backup objects from Kubernetes if they are complete
|
||||
// and there is no corresponding backup in the object storage.
|
||||
func (c *backupSyncController) deleteUnused(cloudBackupNames sets.String) {
|
||||
// Backups objects in Kubernetes
|
||||
backups, err := c.backupLister.Backups(c.namespace).List(labels.Everything())
|
||||
// deleteOrphanedBackups deletes backup objects from Kubernetes that have the specified location
|
||||
// and a phase of Completed, but no corresponding backup in object storage.
|
||||
func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudBackupNames sets.String, log logrus.FieldLogger) {
|
||||
locationSelector := labels.Set(map[string]string{
|
||||
arkv1api.StorageLocationLabel: locationName,
|
||||
}).AsSelector()
|
||||
|
||||
backups, err := c.backupLister.Backups(c.namespace).List(locationSelector)
|
||||
if err != nil {
|
||||
c.logger.WithError(errors.WithStack(err)).Error("Error listing backup from Kubernetes")
|
||||
log.WithError(errors.WithStack(err)).Error("Error listing backups from cluster")
|
||||
return
|
||||
}
|
||||
if len(backups) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// For each completed backup object in Kubernetes, delete it if it
|
||||
// does not have a corresponding backup in object storage
|
||||
for _, backup := range backups {
|
||||
if backup.Status.Phase == api.BackupPhaseCompleted && !cloudBackupNames.Has(backup.Name) {
|
||||
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, nil); err != nil {
|
||||
c.logger.WithError(errors.WithStack(err)).Error("Error deleting unused backup from Kubernetes")
|
||||
} else {
|
||||
c.logger.Debugf("Deleted backup: %s", backup.Name)
|
||||
}
|
||||
log = log.WithField("backup", backup.Name)
|
||||
if backup.Status.Phase != arkv1api.BackupPhaseCompleted || cloudBackupNames.Has(backup.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, nil); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
|
||||
} else {
|
||||
log.Debug("Deleted orphaned backup from cluster")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -20,281 +20,354 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
cloudprovidermocks "github.com/heptio/ark/pkg/cloudprovider/mocks"
|
||||
arkv1api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
|
||||
"github.com/heptio/ark/pkg/util/stringslice"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func defaultLocationsList(namespace string) []*arkv1api.BackupStorageLocation {
|
||||
return []*arkv1api.BackupStorageLocation{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "location-1",
|
||||
},
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: arkv1api.StorageType{
|
||||
ObjectStorage: &arkv1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "location-2",
|
||||
},
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "objStoreProvider",
|
||||
StorageType: arkv1api.StorageType{
|
||||
ObjectStorage: &arkv1api.ObjectStorageLocation{
|
||||
Bucket: "bucket-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupSyncControllerRun(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
listBackupsError error
|
||||
cloudBackups []*v1.Backup
|
||||
namespace string
|
||||
existingBackups sets.String
|
||||
name string
|
||||
namespace string
|
||||
locations []*arkv1api.BackupStorageLocation
|
||||
cloudBackups map[string][]*arkv1api.Backup
|
||||
existingBackups []*arkv1api.Backup
|
||||
}{
|
||||
{
|
||||
name: "no cloud backups",
|
||||
},
|
||||
{
|
||||
name: "backup lister returns error on ListBackups",
|
||||
listBackupsError: errors.New("listBackups"),
|
||||
},
|
||||
{
|
||||
name: "normal case",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "normal case",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBackups: map[string][]*arkv1api.Backup{
|
||||
"bucket-1": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
},
|
||||
"bucket-2": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Finalizer gets removed on sync",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithFinalizers(gcFinalizer).Backup,
|
||||
},
|
||||
name: "gcFinalizer (only) gets removed on sync",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBackups: map[string][]*arkv1api.Backup{
|
||||
"bucket-1": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithFinalizers("a-finalizer", gcFinalizer, "some-other-finalizer").Backup,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Only target finalizer is removed",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithFinalizers(gcFinalizer, "blah").Backup,
|
||||
},
|
||||
namespace: "ns-1",
|
||||
},
|
||||
{
|
||||
name: "backups get created in Ark server's namespace",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-2").WithName("backup-2").Backup,
|
||||
},
|
||||
name: "all synced backups get created in Ark server's namespace",
|
||||
namespace: "heptio-ark",
|
||||
locations: defaultLocationsList("heptio-ark"),
|
||||
cloudBackups: map[string][]*arkv1api.Backup{
|
||||
"bucket-1": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
},
|
||||
"bucket-2": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("heptio-ark").WithName("backup-4").Backup,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "normal case with backups that already exist in Kubernetes",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
name: "new backups get synced when some cloud backups already exist in the cluster",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBackups: map[string][]*arkv1api.Backup{
|
||||
"bucket-1": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
},
|
||||
"bucket-2": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").Backup,
|
||||
},
|
||||
},
|
||||
existingBackups: []*arkv1api.Backup{
|
||||
// add a label to each existing backup so we can differentiate it from the cloud
|
||||
// backup during verification
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel("i-exist", "true").Backup,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "backup storage location names and labels get updated",
|
||||
namespace: "ns-1",
|
||||
locations: defaultLocationsList("ns-1"),
|
||||
cloudBackups: map[string][]*arkv1api.Backup{
|
||||
"bucket-1": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithStorageLocation("foo").WithLabel(arkv1api.StorageLocationLabel, "foo").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
},
|
||||
"bucket-2": {
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithStorageLocation("bar").WithLabel(arkv1api.StorageLocationLabel, "bar").Backup,
|
||||
},
|
||||
},
|
||||
existingBackups: sets.NewString("backup-2", "backup-3"),
|
||||
namespace: "ns-1",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
backupLister = &cloudprovidermocks.BackupLister{}
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger = arktest.NewLogger()
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
objectStore = &arktest.ObjectStore{}
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
client.ArkV1(),
|
||||
backupLister,
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
time.Duration(0),
|
||||
test.namespace,
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
logger,
|
||||
"",
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
arktest.NewLogger(),
|
||||
).(*backupSyncController)
|
||||
|
||||
backupLister.On("ListBackups", "bucket").Return(test.cloudBackups, test.listBackupsError)
|
||||
pluginManager.On("GetObjectStore", "objStoreProvider").Return(objectStore, nil)
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
|
||||
expectedActions := make([]core.Action, 0)
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
client.PrependReactor("get", "backups", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getAction := action.(core.GetAction)
|
||||
if test.existingBackups.Has(getAction.GetName()) {
|
||||
return true, nil, nil
|
||||
for _, location := range test.locations {
|
||||
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location))
|
||||
}
|
||||
|
||||
c.listCloudBackups = func(_ logrus.FieldLogger, _ cloudprovider.ObjectStore, bucket string) ([]*arkv1api.Backup, error) {
|
||||
backups, ok := test.cloudBackups[bucket]
|
||||
if !ok {
|
||||
return nil, errors.New("bucket not found")
|
||||
}
|
||||
// We return nil in place of the found backup object because
|
||||
// we exclusively check for the error and don't use the object
|
||||
// returned by the Get / Backups call.
|
||||
return true, nil, apierrors.NewNotFound(v1.SchemeGroupVersion.WithResource("backups").GroupResource(), getAction.GetName())
|
||||
})
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
for _, existingBackup := range test.existingBackups {
|
||||
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(existingBackup))
|
||||
|
||||
_, err := client.ArkV1().Backups(test.namespace).Create(existingBackup)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
client.ClearActions()
|
||||
|
||||
c.run()
|
||||
|
||||
// we only expect creates for items within the target bucket
|
||||
for _, cloudBackup := range test.cloudBackups {
|
||||
// Verify that the run function stripped the GC finalizer
|
||||
assert.False(t, stringslice.Has(cloudBackup.Finalizers, gcFinalizer))
|
||||
assert.Equal(t, test.namespace, cloudBackup.Namespace)
|
||||
for bucket, backups := range test.cloudBackups {
|
||||
for _, cloudBackup := range backups {
|
||||
obj, err := client.ArkV1().Backups(test.namespace).Get(cloudBackup.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
actionGet := core.NewGetAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
test.namespace,
|
||||
cloudBackup.Name,
|
||||
)
|
||||
expectedActions = append(expectedActions, actionGet)
|
||||
// did this cloud backup already exist in the cluster?
|
||||
var existing *arkv1api.Backup
|
||||
for _, obj := range test.existingBackups {
|
||||
if obj.Name == cloudBackup.Name {
|
||||
existing = obj
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if test.existingBackups.Has(cloudBackup.Name) {
|
||||
continue
|
||||
if existing != nil {
|
||||
// if this cloud backup already exists in the cluster, make sure that what we get from the
|
||||
// client is the existing backup, not the cloud one.
|
||||
assert.Equal(t, existing, obj)
|
||||
} else {
|
||||
// verify that the GC finalizer is removed
|
||||
assert.Equal(t, stringslice.Except(cloudBackup.Finalizers, gcFinalizer), obj.Finalizers)
|
||||
|
||||
// verify that the storage location field and label are set properly
|
||||
for _, location := range test.locations {
|
||||
if location.Spec.ObjectStorage.Bucket == bucket {
|
||||
assert.Equal(t, location.Name, obj.Spec.StorageLocation)
|
||||
assert.Equal(t, location.Name, obj.Labels[arkv1api.StorageLocationLabel])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
actionCreate := core.NewCreateAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
test.namespace,
|
||||
cloudBackup,
|
||||
)
|
||||
expectedActions = append(expectedActions, actionCreate)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedActions, client.Actions())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUnused(t *testing.T) {
|
||||
func TestDeleteOrphanedBackups(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cloudBackups []*v1.Backup
|
||||
cloudBackups sets.String
|
||||
k8sBackups []*arktest.TestBackup
|
||||
namespace string
|
||||
expectedDeletes sets.String
|
||||
}{
|
||||
{
|
||||
name: "no overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "no overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupB").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupB").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
},
|
||||
expectedDeletes: sets.NewString("backupA", "backupB", "backupC"),
|
||||
},
|
||||
{
|
||||
name: "some overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "some overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
},
|
||||
expectedDeletes: sets.NewString("backupC"),
|
||||
expectedDeletes: sets.NewString("backup-C"),
|
||||
},
|
||||
{
|
||||
name: "all overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "all overlapping backups",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
},
|
||||
expectedDeletes: sets.NewString(),
|
||||
},
|
||||
{
|
||||
name: "no overlapping backups but including backups that are not complete",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "no overlapping backups but including backups that are not complete",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithPhase(v1.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("Deleting").WithPhase(v1.BackupPhaseDeleting),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("Failed").WithPhase(v1.BackupPhaseFailed),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("FailedValidation").WithPhase(v1.BackupPhaseFailedValidation),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("InProgress").WithPhase(v1.BackupPhaseInProgress),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("New").WithPhase(v1.BackupPhaseNew),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("Deleting").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseDeleting),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("Failed").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailed),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("FailedValidation").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailedValidation),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("InProgress").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseInProgress),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("New").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseNew),
|
||||
},
|
||||
expectedDeletes: sets.NewString("backupA"),
|
||||
},
|
||||
{
|
||||
name: "all overlapping backups and all backups that are not complete",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: []*v1.Backup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup,
|
||||
},
|
||||
name: "all overlapping backups and all backups that are not complete",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithPhase(v1.BackupPhaseFailed),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithPhase(v1.BackupPhaseFailedValidation),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithPhase(v1.BackupPhaseInProgress),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailed),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailedValidation),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseInProgress),
|
||||
},
|
||||
expectedDeletes: sets.NewString(),
|
||||
},
|
||||
{
|
||||
name: "no completed backups in other locations are deleted",
|
||||
namespace: "ns-1",
|
||||
cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"),
|
||||
k8sBackups: []*arktest.TestBackup{
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-5").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-6").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted),
|
||||
},
|
||||
expectedDeletes: sets.NewString("backup-C"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
backupLister = &cloudprovidermocks.BackupLister{}
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger = arktest.NewLogger()
|
||||
)
|
||||
|
||||
c := NewBackupSyncController(
|
||||
client.ArkV1(),
|
||||
backupLister,
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
time.Duration(0),
|
||||
test.namespace,
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
logger,
|
||||
"",
|
||||
nil, // new plugin manager func
|
||||
arktest.NewLogger(),
|
||||
).(*backupSyncController)
|
||||
|
||||
expectedDeleteActions := make([]core.Action, 0)
|
||||
|
||||
// setup: insert backups into Kubernetes
|
||||
for _, backup := range test.k8sBackups {
|
||||
// add test backup to informer
|
||||
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup.Backup), "Error adding backup to informer")
|
||||
|
||||
// add test backup to client
|
||||
_, err := client.Ark().Backups(test.namespace).Create(backup.Backup)
|
||||
require.NoError(t, err, "Error adding backup to clientset")
|
||||
|
||||
// if we expect this backup to be deleted, set up the expected DeleteAction
|
||||
if test.expectedDeletes.Has(backup.Name) {
|
||||
actionDelete := core.NewDeleteAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
arkv1api.SchemeGroupVersion.WithResource("backups"),
|
||||
test.namespace,
|
||||
backup.Name,
|
||||
)
|
||||
expectedDeleteActions = append(expectedDeleteActions, actionDelete)
|
||||
}
|
||||
|
||||
// add test backup to informer:
|
||||
err := sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup.Backup)
|
||||
assert.NoError(t, err, "Error adding backup to informer")
|
||||
|
||||
// add test backup to kubernetes:
|
||||
_, err = client.Ark().Backups(test.namespace).Create(backup.Backup)
|
||||
assert.NoError(t, err, "Error deleting from clientset")
|
||||
}
|
||||
|
||||
// get names of client backups
|
||||
testBackupNames := sets.NewString()
|
||||
for _, cloudBackup := range test.cloudBackups {
|
||||
testBackupNames.Insert(cloudBackup.Name)
|
||||
}
|
||||
|
||||
c.deleteUnused(testBackupNames)
|
||||
c.deleteOrphanedBackups("default", test.cloudBackups, arktest.NewLogger())
|
||||
|
||||
numBackups, err := numBackups(t, client, c.namespace)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -17,9 +17,7 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
|
@ -31,32 +29,28 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
|
||||
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
"github.com/heptio/ark/pkg/util/kube"
|
||||
)
|
||||
|
||||
type downloadRequestController struct {
|
||||
downloadRequestClient arkv1client.DownloadRequestsGetter
|
||||
downloadRequestLister listers.DownloadRequestLister
|
||||
downloadRequestListerSynced cache.InformerSynced
|
||||
restoreLister listers.RestoreLister
|
||||
restoreListerSynced cache.InformerSynced
|
||||
objectStore cloudprovider.ObjectStore
|
||||
bucket string
|
||||
syncHandler func(key string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
clock clock.Clock
|
||||
logger logrus.FieldLogger
|
||||
*genericController
|
||||
|
||||
createSignedURL cloudprovider.CreateSignedURLFunc
|
||||
downloadRequestClient arkv1client.DownloadRequestsGetter
|
||||
downloadRequestLister listers.DownloadRequestLister
|
||||
restoreLister listers.RestoreLister
|
||||
clock clock.Clock
|
||||
createSignedURL cloudprovider.CreateSignedURLFunc
|
||||
backupLocationLister listers.BackupStorageLocationLister
|
||||
backupLister listers.BackupLister
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager
|
||||
}
|
||||
|
||||
// NewDownloadRequestController creates a new DownloadRequestController.
|
||||
|
@ -64,26 +58,35 @@ func NewDownloadRequestController(
|
|||
downloadRequestClient arkv1client.DownloadRequestsGetter,
|
||||
downloadRequestInformer informers.DownloadRequestInformer,
|
||||
restoreInformer informers.RestoreInformer,
|
||||
objectStore cloudprovider.ObjectStore,
|
||||
bucket string,
|
||||
backupLocationInformer informers.BackupStorageLocationInformer,
|
||||
backupInformer informers.BackupInformer,
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager,
|
||||
logger logrus.FieldLogger,
|
||||
) Interface {
|
||||
c := &downloadRequestController{
|
||||
downloadRequestClient: downloadRequestClient,
|
||||
downloadRequestLister: downloadRequestInformer.Lister(),
|
||||
downloadRequestListerSynced: downloadRequestInformer.Informer().HasSynced,
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
restoreListerSynced: restoreInformer.Informer().HasSynced,
|
||||
objectStore: objectStore,
|
||||
bucket: bucket,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "downloadrequest"),
|
||||
clock: &clock.RealClock{},
|
||||
logger: logger,
|
||||
genericController: newGenericController("downloadrequest", logger),
|
||||
downloadRequestClient: downloadRequestClient,
|
||||
downloadRequestLister: downloadRequestInformer.Lister(),
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
backupLocationLister: backupLocationInformer.Lister(),
|
||||
backupLister: backupInformer.Lister(),
|
||||
|
||||
createSignedURL: cloudprovider.CreateSignedURL,
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
createSignedURL: cloudprovider.CreateSignedURL,
|
||||
newPluginManager: newPluginManager,
|
||||
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
|
||||
c.syncHandler = c.processDownloadRequest
|
||||
c.cacheSyncWaiters = append(
|
||||
c.cacheSyncWaiters,
|
||||
downloadRequestInformer.Informer().HasSynced,
|
||||
restoreInformer.Informer().HasSynced,
|
||||
backupLocationInformer.Informer().HasSynced,
|
||||
backupInformer.Informer().HasSynced,
|
||||
)
|
||||
|
||||
downloadRequestInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
|
@ -104,102 +107,21 @@ func NewDownloadRequestController(
|
|||
return c
|
||||
}
|
||||
|
||||
// Run is a blocking function that runs the specified number of worker goroutines
|
||||
// to process items in the work queue. It will return when it receives on the
|
||||
// ctx.Done() channel.
|
||||
func (c *downloadRequestController) Run(ctx context.Context, numWorkers int) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
defer func() {
|
||||
c.logger.Info("Waiting for workers to finish their work")
|
||||
|
||||
c.queue.ShutDown()
|
||||
|
||||
// We have to wait here in the deferred function instead of at the bottom of the function body
|
||||
// because we have to shut down the queue in order for the workers to shut down gracefully, and
|
||||
// we want to shut down the queue via defer and not at the end of the body.
|
||||
wg.Wait()
|
||||
|
||||
c.logger.Info("All workers have finished")
|
||||
}()
|
||||
|
||||
c.logger.Info("Starting DownloadRequestController")
|
||||
defer c.logger.Info("Shutting down DownloadRequestController")
|
||||
|
||||
c.logger.Info("Waiting for caches to sync")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), c.downloadRequestListerSynced, c.restoreListerSynced) {
|
||||
return errors.New("timed out waiting for caches to sync")
|
||||
}
|
||||
c.logger.Info("Caches are synced")
|
||||
|
||||
wg.Add(numWorkers)
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
go func() {
|
||||
wait.Until(c.runWorker, time.Second, ctx.Done())
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wait.Until(c.resync, time.Minute, ctx.Done())
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runWorker runs a worker until the controller's queue indicates it's time to shut down.
|
||||
func (c *downloadRequestController) runWorker() {
|
||||
// continually take items off the queue (waits if it's
|
||||
// empty) until we get a shutdown signal from the queue
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
// processNextWorkItem processes a single item from the queue.
|
||||
func (c *downloadRequestController) processNextWorkItem() bool {
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
// always call done on this item, since if it fails we'll add
|
||||
// it back with rate-limiting below
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncHandler(key.(string))
|
||||
if err == nil {
|
||||
// If you had no error, tell the queue to stop tracking history for your key. This will reset
|
||||
// things like failure counts for per-item rate limiting.
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
c.logger.WithError(err).WithField("key", key).Error("Error in syncHandler, re-adding item to queue")
|
||||
|
||||
// we had an error processing the item so add it back
|
||||
// into the queue for re-processing with rate-limiting
|
||||
c.queue.AddRateLimited(key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// processDownloadRequest is the default per-item sync handler. It generates a pre-signed URL for
|
||||
// a new DownloadRequest or deletes the DownloadRequest if it has expired.
|
||||
func (c *downloadRequestController) processDownloadRequest(key string) error {
|
||||
logContext := c.logger.WithField("key", key)
|
||||
log := c.logger.WithField("key", key)
|
||||
|
||||
logContext.Debug("Running processDownloadRequest")
|
||||
log.Debug("Running processDownloadRequest")
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error splitting queue key")
|
||||
log.WithError(err).Error("error splitting queue key")
|
||||
return nil
|
||||
}
|
||||
|
||||
downloadRequest, err := c.downloadRequestLister.DownloadRequests(ns).Get(name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
logContext.Debug("Unable to find DownloadRequest")
|
||||
log.Debug("Unable to find DownloadRequest")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -208,7 +130,7 @@ func (c *downloadRequestController) processDownloadRequest(key string) error {
|
|||
|
||||
switch downloadRequest.Status.Phase {
|
||||
case "", v1.DownloadRequestPhaseNew:
|
||||
return c.generatePreSignedURL(downloadRequest)
|
||||
return c.generatePreSignedURL(downloadRequest, log)
|
||||
case v1.DownloadRequestPhaseProcessed:
|
||||
return c.deleteIfExpired(downloadRequest)
|
||||
}
|
||||
|
@ -220,7 +142,7 @@ const signedURLTTL = 10 * time.Minute
|
|||
|
||||
// generatePreSignedURL generates a pre-signed URL for downloadRequest, changes the phase to
|
||||
// Processed, and persists the changes to storage.
|
||||
func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.DownloadRequest) error {
|
||||
func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.DownloadRequest, log logrus.FieldLogger) error {
|
||||
update := downloadRequest.DeepCopy()
|
||||
|
||||
var (
|
||||
|
@ -240,7 +162,25 @@ func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.Dow
|
|||
directory = downloadRequest.Spec.Target.Name
|
||||
}
|
||||
|
||||
update.Status.DownloadURL, err = c.createSignedURL(c.objectStore, downloadRequest.Spec.Target, c.bucket, directory, signedURLTTL)
|
||||
backup, err := c.backupLister.Backups(downloadRequest.Namespace).Get(directory)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
backupLocation, err := c.backupLocationLister.BackupStorageLocations(backup.Namespace).Get(backup.Spec.StorageLocation)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
pluginManager := c.newPluginManager(log)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
objectStore, err := getObjectStoreForLocation(backupLocation, pluginManager)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
update.Status.DownloadURL, err = c.createSignedURL(objectStore, downloadRequest.Spec.Target, backupLocation.Spec.ObjectStorage.Bucket, directory, signedURLTTL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -256,7 +196,7 @@ func (c *downloadRequestController) generatePreSignedURL(downloadRequest *v1.Dow
|
|||
func (c *downloadRequestController) deleteIfExpired(downloadRequest *v1.DownloadRequest) error {
|
||||
logContext := c.logger.WithField("key", kube.NamespaceAndName(downloadRequest))
|
||||
logContext.Info("checking for expiration of DownloadRequest")
|
||||
if downloadRequest.Status.Expiration.Time.Before(c.clock.Now()) {
|
||||
if downloadRequest.Status.Expiration.Time.After(c.clock.Now()) {
|
||||
logContext.Debug("DownloadRequest has not expired")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,191 +17,299 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
|
||||
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
|
||||
"github.com/heptio/ark/pkg/plugin"
|
||||
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
type downloadRequestTestHarness struct {
|
||||
client *fake.Clientset
|
||||
informerFactory informers.SharedInformerFactory
|
||||
pluginManager *pluginmocks.Manager
|
||||
objectStore *arktest.ObjectStore
|
||||
|
||||
controller *downloadRequestController
|
||||
}
|
||||
|
||||
func newDownloadRequestTestHarness(t *testing.T) *downloadRequestTestHarness {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
informerFactory = informers.NewSharedInformerFactory(client, 0)
|
||||
pluginManager = new(pluginmocks.Manager)
|
||||
objectStore = new(arktest.ObjectStore)
|
||||
controller = NewDownloadRequestController(
|
||||
client.ArkV1(),
|
||||
informerFactory.Ark().V1().DownloadRequests(),
|
||||
informerFactory.Ark().V1().Restores(),
|
||||
informerFactory.Ark().V1().BackupStorageLocations(),
|
||||
informerFactory.Ark().V1().Backups(),
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
arktest.NewLogger(),
|
||||
).(*downloadRequestController)
|
||||
)
|
||||
|
||||
clockTime, err := time.Parse(time.RFC1123, time.RFC1123)
|
||||
require.NoError(t, err)
|
||||
|
||||
controller.clock = clock.NewFakeClock(clockTime)
|
||||
|
||||
pluginManager.On("CleanupClients").Return()
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
return &downloadRequestTestHarness{
|
||||
client: client,
|
||||
informerFactory: informerFactory,
|
||||
pluginManager: pluginManager,
|
||||
objectStore: objectStore,
|
||||
controller: controller,
|
||||
}
|
||||
}
|
||||
|
||||
func newDownloadRequest(phase v1.DownloadRequestPhase, targetKind v1.DownloadTargetKind, targetName string) *v1.DownloadRequest {
|
||||
return &v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "a-download-request",
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: v1.DownloadTarget{
|
||||
Kind: targetKind,
|
||||
Name: targetName,
|
||||
},
|
||||
},
|
||||
Status: v1.DownloadRequestStatus{
|
||||
Phase: phase,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newBackupLocation(name, provider, bucket string) *v1.BackupStorageLocation {
|
||||
return &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
Spec: v1.BackupStorageLocationSpec{
|
||||
Provider: provider,
|
||||
StorageType: v1.StorageType{
|
||||
ObjectStorage: &v1.ObjectStorageLocation{
|
||||
Bucket: bucket,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessDownloadRequest(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
phase v1.DownloadRequestPhase
|
||||
targetKind v1.DownloadTargetKind
|
||||
targetName string
|
||||
restore *v1.Restore
|
||||
expectedError string
|
||||
expectedDir string
|
||||
expectedPhase v1.DownloadRequestPhase
|
||||
expectedURL string
|
||||
name string
|
||||
key string
|
||||
downloadRequest *v1.DownloadRequest
|
||||
backup *v1.Backup
|
||||
restore *v1.Restore
|
||||
backupLocation *v1.BackupStorageLocation
|
||||
expired bool
|
||||
expectedErr string
|
||||
expectedRequestedObject string
|
||||
}{
|
||||
{
|
||||
name: "empty key",
|
||||
name: "empty key returns without error",
|
||||
key: "",
|
||||
},
|
||||
{
|
||||
name: "bad key format",
|
||||
key: "a/b/c",
|
||||
expectedError: `error splitting queue key: unexpected key format: "a/b/c"`,
|
||||
name: "bad key format returns without error",
|
||||
key: "a/b/c",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase '' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: "",
|
||||
targetKind: v1.DownloadTargetKindBackupLog,
|
||||
targetName: "backup1",
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
name: "no download request for key returns without error",
|
||||
key: "nonexistent/key",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase 'New' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: v1.DownloadRequestPhaseNew,
|
||||
targetKind: v1.DownloadTargetKindBackupLog,
|
||||
targetName: "backup1",
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
name: "backup contents request for nonexistent backup returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "backup.ark.heptio.com \"a-backup\" not found",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase '' gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: "",
|
||||
targetKind: v1.DownloadTargetKindRestoreLog,
|
||||
targetName: "backup1-20170912150214",
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
name: "restore log request for nonexistent restore returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "error getting Restore: restore.ark.heptio.com \"a-backup-20170912150214\" not found",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase New gets a url",
|
||||
key: "heptio-ark/dr1",
|
||||
phase: v1.DownloadRequestPhaseNew,
|
||||
targetKind: v1.DownloadTargetKindRestoreLog,
|
||||
targetName: "backup1-20170912150214",
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "backup1-20170912150214", v1.RestorePhaseCompleted).WithBackup("backup1").Restore,
|
||||
expectedDir: "backup1",
|
||||
expectedPhase: v1.DownloadRequestPhaseProcessed,
|
||||
expectedURL: "signedURL",
|
||||
name: "backup contents request for backup with nonexistent location returns an error",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("non-matching-location", "a-provider", "a-bucket"),
|
||||
expectedErr: "backupstoragelocation.ark.heptio.com \"a-location\" not found",
|
||||
},
|
||||
{
|
||||
name: "backup contents request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup.tar.gz",
|
||||
},
|
||||
{
|
||||
name: "backup contents request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupContents, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup.tar.gz",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupLog, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "backup log request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupLog, "a-backup"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/a-backup-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore log request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results request with phase '' gets a url",
|
||||
downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results request with phase 'New' gets a url",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"),
|
||||
restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"),
|
||||
expectedRequestedObject: "a-backup/restore-a-backup-20170912150214-results.gz",
|
||||
},
|
||||
{
|
||||
name: "request with phase 'Processed' is not deleted if not expired",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
},
|
||||
{
|
||||
name: "request with phase 'Processed' is deleted if expired",
|
||||
downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"),
|
||||
backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup,
|
||||
expired: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
downloadRequestsInformer = sharedInformers.Ark().V1().DownloadRequests()
|
||||
restoresInformer = sharedInformers.Ark().V1().Restores()
|
||||
logger = arktest.NewLogger()
|
||||
clockTime, _ = time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006")
|
||||
)
|
||||
harness := newDownloadRequestTestHarness(t)
|
||||
|
||||
c := NewDownloadRequestController(
|
||||
client.ArkV1(),
|
||||
downloadRequestsInformer,
|
||||
restoresInformer,
|
||||
nil, // objectStore
|
||||
"bucket",
|
||||
logger,
|
||||
).(*downloadRequestController)
|
||||
// set up test case data
|
||||
|
||||
c.clock = clock.NewFakeClock(clockTime)
|
||||
|
||||
var downloadRequest *v1.DownloadRequest
|
||||
|
||||
if tc.expectedPhase == v1.DownloadRequestPhaseProcessed {
|
||||
expectedTarget := v1.DownloadTarget{
|
||||
Kind: tc.targetKind,
|
||||
Name: tc.targetName,
|
||||
}
|
||||
|
||||
downloadRequest = &v1.DownloadRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "dr1",
|
||||
},
|
||||
Spec: v1.DownloadRequestSpec{
|
||||
Target: expectedTarget,
|
||||
},
|
||||
}
|
||||
downloadRequestsInformer.Informer().GetStore().Add(downloadRequest)
|
||||
|
||||
if tc.restore != nil {
|
||||
restoresInformer.Informer().GetStore().Add(tc.restore)
|
||||
}
|
||||
|
||||
c.createSignedURL = func(objectStore cloudprovider.ObjectStore, target v1.DownloadTarget, bucket, directory string, ttl time.Duration) (string, error) {
|
||||
require.Equal(t, expectedTarget, target)
|
||||
require.Equal(t, "bucket", bucket)
|
||||
require.Equal(t, tc.expectedDir, directory)
|
||||
require.Equal(t, 10*time.Minute, ttl)
|
||||
return "signedURL", nil
|
||||
// Set .status.expiration properly for processed requests. Since "expired" is relative to the controller's
|
||||
// clock time, it's easier to do this here than as part of the test case definitions.
|
||||
if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
|
||||
if tc.expired {
|
||||
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(-1 * time.Minute)
|
||||
} else {
|
||||
tc.downloadRequest.Status.Expiration.Time = harness.controller.clock.Now().Add(time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
// method under test
|
||||
err := c.processDownloadRequest(tc.key)
|
||||
if tc.downloadRequest != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().DownloadRequests().Informer().GetStore().Add(tc.downloadRequest))
|
||||
|
||||
if tc.expectedError != "" {
|
||||
assert.EqualError(t, err, tc.expectedError)
|
||||
return
|
||||
_, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Create(tc.downloadRequest)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
actions := client.Actions()
|
||||
|
||||
// if we don't expect a phase update, this means
|
||||
// we don't expect any actions to take place
|
||||
if tc.expectedPhase == "" {
|
||||
require.Equal(t, 0, len(actions))
|
||||
return
|
||||
if tc.restore != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().Restores().Informer().GetStore().Add(tc.restore))
|
||||
}
|
||||
|
||||
// otherwise, we should get exactly 1 patch
|
||||
require.Equal(t, 1, len(actions))
|
||||
|
||||
type PatchStatus struct {
|
||||
DownloadURL string `json:"downloadURL"`
|
||||
Phase v1.DownloadRequestPhase `json:"phase"`
|
||||
Expiration time.Time `json:"expiration"`
|
||||
if tc.backup != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().Backups().Informer().GetStore().Add(tc.backup))
|
||||
}
|
||||
|
||||
type Patch struct {
|
||||
Status PatchStatus `json:"status"`
|
||||
if tc.backupLocation != nil {
|
||||
require.NoError(t, harness.informerFactory.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(tc.backupLocation))
|
||||
|
||||
harness.pluginManager.On("GetObjectStore", tc.backupLocation.Spec.Provider).Return(harness.objectStore, nil)
|
||||
}
|
||||
|
||||
decode := func(decoder *json.Decoder) (interface{}, error) {
|
||||
actual := new(Patch)
|
||||
err := decoder.Decode(actual)
|
||||
|
||||
return *actual, err
|
||||
if tc.expectedRequestedObject != "" {
|
||||
harness.objectStore.On("CreateSignedURL", tc.backupLocation.Spec.ObjectStorage.Bucket, tc.expectedRequestedObject, mock.Anything).Return("a-url", nil)
|
||||
}
|
||||
|
||||
expected := Patch{
|
||||
Status: PatchStatus{
|
||||
DownloadURL: tc.expectedURL,
|
||||
Phase: tc.expectedPhase,
|
||||
Expiration: clockTime.Add(signedURLTTL),
|
||||
},
|
||||
// exercise method under test
|
||||
key := tc.key
|
||||
if key == "" && tc.downloadRequest != nil {
|
||||
key = kubeutil.NamespaceAndName(tc.downloadRequest)
|
||||
}
|
||||
|
||||
arktest.ValidatePatch(t, actions[0], expected, decode)
|
||||
err := harness.controller.processDownloadRequest(key)
|
||||
|
||||
// verify results
|
||||
if tc.expectedErr != "" {
|
||||
require.Equal(t, tc.expectedErr, err.Error())
|
||||
} else {
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
if tc.expectedRequestedObject != "" {
|
||||
output, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, string(v1.DownloadRequestPhaseProcessed), string(output.Status.Phase))
|
||||
assert.Equal(t, "a-url", output.Status.DownloadURL)
|
||||
assert.True(t, arktest.TimesAreEqual(harness.controller.clock.Now().Add(signedURLTTL), output.Status.Expiration.Time), "expiration does not match")
|
||||
}
|
||||
|
||||
if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed {
|
||||
res, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{})
|
||||
|
||||
if tc.expired {
|
||||
assert.True(t, apierrors.IsNotFound(err))
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tc.downloadRequest, res)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,9 +53,9 @@ func newGenericController(name string, logger logrus.FieldLogger) *genericContro
|
|||
// to process items in the work queue. It will return when it receives on the
|
||||
// ctx.Done() channel.
|
||||
func (c *genericController) Run(ctx context.Context, numWorkers int) error {
|
||||
if c.syncHandler == nil {
|
||||
if c.syncHandler == nil && c.resyncFunc == nil {
|
||||
// programmer error
|
||||
panic("syncHandler is required")
|
||||
panic("at least one of syncHandler or resyncFunc is required")
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -83,12 +83,14 @@ func (c *genericController) Run(ctx context.Context, numWorkers int) error {
|
|||
}
|
||||
c.logger.Info("Caches are synced")
|
||||
|
||||
wg.Add(numWorkers)
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
go func() {
|
||||
wait.Until(c.runWorker, time.Second, ctx.Done())
|
||||
wg.Done()
|
||||
}()
|
||||
if c.syncHandler != nil {
|
||||
wg.Add(numWorkers)
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
go func() {
|
||||
wait.Until(c.runWorker, time.Second, ctx.Done())
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
if c.resyncFunc != nil {
|
||||
|
|
|
@ -44,7 +44,7 @@ type resticRepositoryController struct {
|
|||
|
||||
resticRepositoryClient arkv1client.ResticRepositoriesGetter
|
||||
resticRepositoryLister listers.ResticRepositoryLister
|
||||
objectStorageConfig arkv1api.ObjectStorageProviderConfig
|
||||
storageLocation *arkv1api.BackupStorageLocation
|
||||
repositoryManager restic.RepositoryManager
|
||||
|
||||
clock clock.Clock
|
||||
|
@ -55,14 +55,14 @@ func NewResticRepositoryController(
|
|||
logger logrus.FieldLogger,
|
||||
resticRepositoryInformer informers.ResticRepositoryInformer,
|
||||
resticRepositoryClient arkv1client.ResticRepositoriesGetter,
|
||||
objectStorageConfig arkv1api.ObjectStorageProviderConfig,
|
||||
storageLocation *arkv1api.BackupStorageLocation,
|
||||
repositoryManager restic.RepositoryManager,
|
||||
) Interface {
|
||||
c := &resticRepositoryController{
|
||||
genericController: newGenericController("restic-repository", logger),
|
||||
resticRepositoryClient: resticRepositoryClient,
|
||||
resticRepositoryLister: resticRepositoryInformer.Lister(),
|
||||
objectStorageConfig: objectStorageConfig,
|
||||
storageLocation: storageLocation,
|
||||
repositoryManager: repositoryManager,
|
||||
clock: &clock.RealClock{},
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (c *resticRepositoryController) initializeRepo(req *v1.ResticRepository, lo
|
|||
|
||||
// defaulting - if the patch fails, return an error so the item is returned to the queue
|
||||
if err := c.patchResticRepository(req, func(r *v1.ResticRepository) {
|
||||
r.Spec.ResticIdentifier = restic.GetRepoIdentifier(c.objectStorageConfig, r.Name)
|
||||
r.Spec.ResticIdentifier = restic.GetRepoIdentifier(c.storageLocation, r.Name)
|
||||
|
||||
if r.Spec.MaintenanceFrequency.Duration <= 0 {
|
||||
r.Spec.MaintenanceFrequency = metav1.Duration{Duration: restic.DefaultMaintenanceFrequency}
|
||||
|
|
|
@ -71,29 +71,29 @@ var nonRestorableResources = []string{
|
|||
}
|
||||
|
||||
type restoreController struct {
|
||||
namespace string
|
||||
restoreClient arkv1client.RestoresGetter
|
||||
backupClient arkv1client.BackupsGetter
|
||||
restorer restore.Restorer
|
||||
objectStoreConfig api.CloudProviderConfig
|
||||
bucket string
|
||||
pvProviderExists bool
|
||||
backupLister listers.BackupLister
|
||||
backupListerSynced cache.InformerSynced
|
||||
restoreLister listers.RestoreLister
|
||||
restoreListerSynced cache.InformerSynced
|
||||
syncHandler func(restoreName string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
logger logrus.FieldLogger
|
||||
logLevel logrus.Level
|
||||
pluginRegistry plugin.Registry
|
||||
metrics *metrics.ServerMetrics
|
||||
namespace string
|
||||
restoreClient arkv1client.RestoresGetter
|
||||
backupClient arkv1client.BackupsGetter
|
||||
restorer restore.Restorer
|
||||
pvProviderExists bool
|
||||
backupLister listers.BackupLister
|
||||
backupListerSynced cache.InformerSynced
|
||||
restoreLister listers.RestoreLister
|
||||
restoreListerSynced cache.InformerSynced
|
||||
backupLocationLister listers.BackupStorageLocationLister
|
||||
backupLocationListerSynced cache.InformerSynced
|
||||
syncHandler func(restoreName string) error
|
||||
queue workqueue.RateLimitingInterface
|
||||
logger logrus.FieldLogger
|
||||
logLevel logrus.Level
|
||||
defaultBackupLocation string
|
||||
metrics *metrics.ServerMetrics
|
||||
|
||||
getBackup cloudprovider.GetBackupFunc
|
||||
downloadBackup cloudprovider.DownloadBackupFunc
|
||||
uploadRestoreLog cloudprovider.UploadRestoreLogFunc
|
||||
uploadRestoreResults cloudprovider.UploadRestoreResultsFunc
|
||||
newPluginManager func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager
|
||||
newPluginManager func(logger logrus.FieldLogger) plugin.Manager
|
||||
}
|
||||
|
||||
func NewRestoreController(
|
||||
|
@ -102,41 +102,40 @@ func NewRestoreController(
|
|||
restoreClient arkv1client.RestoresGetter,
|
||||
backupClient arkv1client.BackupsGetter,
|
||||
restorer restore.Restorer,
|
||||
objectStoreConfig api.CloudProviderConfig,
|
||||
bucket string,
|
||||
backupInformer informers.BackupInformer,
|
||||
backupLocationInformer informers.BackupStorageLocationInformer,
|
||||
pvProviderExists bool,
|
||||
logger logrus.FieldLogger,
|
||||
logLevel logrus.Level,
|
||||
pluginRegistry plugin.Registry,
|
||||
newPluginManager func(logrus.FieldLogger) plugin.Manager,
|
||||
defaultBackupLocation string,
|
||||
metrics *metrics.ServerMetrics,
|
||||
|
||||
) Interface {
|
||||
c := &restoreController{
|
||||
namespace: namespace,
|
||||
restoreClient: restoreClient,
|
||||
backupClient: backupClient,
|
||||
restorer: restorer,
|
||||
objectStoreConfig: objectStoreConfig,
|
||||
bucket: bucket,
|
||||
pvProviderExists: pvProviderExists,
|
||||
backupLister: backupInformer.Lister(),
|
||||
backupListerSynced: backupInformer.Informer().HasSynced,
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
restoreListerSynced: restoreInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "restore"),
|
||||
logger: logger,
|
||||
logLevel: logLevel,
|
||||
pluginRegistry: pluginRegistry,
|
||||
metrics: metrics,
|
||||
namespace: namespace,
|
||||
restoreClient: restoreClient,
|
||||
backupClient: backupClient,
|
||||
restorer: restorer,
|
||||
pvProviderExists: pvProviderExists,
|
||||
backupLister: backupInformer.Lister(),
|
||||
backupListerSynced: backupInformer.Informer().HasSynced,
|
||||
restoreLister: restoreInformer.Lister(),
|
||||
restoreListerSynced: restoreInformer.Informer().HasSynced,
|
||||
backupLocationLister: backupLocationInformer.Lister(),
|
||||
backupLocationListerSynced: backupLocationInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "restore"),
|
||||
logger: logger,
|
||||
logLevel: logLevel,
|
||||
defaultBackupLocation: defaultBackupLocation,
|
||||
metrics: metrics,
|
||||
|
||||
// use variables to refer to these functions so they can be
|
||||
// replaced with fakes for testing.
|
||||
newPluginManager: newPluginManager,
|
||||
getBackup: cloudprovider.GetBackup,
|
||||
downloadBackup: cloudprovider.DownloadBackup,
|
||||
uploadRestoreLog: cloudprovider.UploadRestoreLog,
|
||||
uploadRestoreResults: cloudprovider.UploadRestoreResults,
|
||||
newPluginManager: func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager {
|
||||
return plugin.NewManager(logger, logLevel, pluginRegistry)
|
||||
},
|
||||
}
|
||||
|
||||
c.syncHandler = c.processRestore
|
||||
|
@ -193,7 +192,7 @@ func (c *restoreController) Run(ctx context.Context, numWorkers int) error {
|
|||
defer c.logger.Info("Shutting down RestoreController")
|
||||
|
||||
c.logger.Info("Waiting for caches to sync")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), c.backupListerSynced, c.restoreListerSynced) {
|
||||
if !cache.WaitForCacheSync(ctx.Done(), c.backupListerSynced, c.restoreListerSynced, c.backupLocationListerSynced) {
|
||||
return errors.New("timed out waiting for caches to sync")
|
||||
}
|
||||
c.logger.Info("Caches are synced")
|
||||
|
@ -280,31 +279,28 @@ func (c *restoreController) processRestore(key string) error {
|
|||
// don't modify items in the cache
|
||||
restore = restore.DeepCopy()
|
||||
|
||||
pluginManager := c.newPluginManager(logContext, logContext.Level, c.pluginRegistry)
|
||||
pluginManager := c.newPluginManager(logContext)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
objectStore, err := getObjectStore(c.objectStoreConfig, pluginManager)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing object store")
|
||||
}
|
||||
|
||||
actions, err := pluginManager.GetRestoreItemActions()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing restore item actions")
|
||||
}
|
||||
|
||||
// complete & validate restore
|
||||
if restore.Status.ValidationErrors = c.completeAndValidate(objectStore, restore); len(restore.Status.ValidationErrors) > 0 {
|
||||
restore.Status.Phase = api.RestorePhaseFailedValidation
|
||||
} else {
|
||||
restore.Status.Phase = api.RestorePhaseInProgress
|
||||
}
|
||||
|
||||
// validate the restore and fetch the backup
|
||||
info := c.validateAndComplete(restore, pluginManager)
|
||||
backupScheduleName := restore.Spec.ScheduleName
|
||||
// Register attempts after validation so we don't have to fetch the backup multiple times
|
||||
c.metrics.RegisterRestoreAttempt(backupScheduleName)
|
||||
|
||||
// update status
|
||||
if len(restore.Status.ValidationErrors) > 0 {
|
||||
restore.Status.Phase = api.RestorePhaseFailedValidation
|
||||
c.metrics.RegisterRestoreValidationFailed(backupScheduleName)
|
||||
} else {
|
||||
restore.Status.Phase = api.RestorePhaseInProgress
|
||||
}
|
||||
|
||||
// patch to update status and persist to API
|
||||
updatedRestore, err := patchRestore(original, restore, c.restoreClient)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error updating Restore phase to %s", restore.Status.Phase)
|
||||
|
@ -314,15 +310,16 @@ func (c *restoreController) processRestore(key string) error {
|
|||
restore = updatedRestore.DeepCopy()
|
||||
|
||||
if restore.Status.Phase == api.RestorePhaseFailedValidation {
|
||||
c.metrics.RegisterRestoreValidationFailed(backupScheduleName)
|
||||
return nil
|
||||
}
|
||||
|
||||
logContext.Debug("Running restore")
|
||||
|
||||
// execution & upload of restore
|
||||
restoreWarnings, restoreErrors, restoreFailure := c.runRestore(
|
||||
restore,
|
||||
actions,
|
||||
objectStore,
|
||||
info,
|
||||
)
|
||||
|
||||
restore.Status.Warnings = len(restoreWarnings.Ark) + len(restoreWarnings.Cluster)
|
||||
|
@ -355,7 +352,13 @@ func (c *restoreController) processRestore(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *restoreController) completeAndValidate(objectStore cloudprovider.ObjectStore, restore *api.Restore) []string {
|
||||
type backupInfo struct {
|
||||
bucketName string
|
||||
backup *api.Backup
|
||||
objectStore cloudprovider.ObjectStore
|
||||
}
|
||||
|
||||
func (c *restoreController) validateAndComplete(restore *api.Restore, pluginManager plugin.Manager) backupInfo {
|
||||
// add non-restorable resources to restore's excluded resources
|
||||
excludedResources := sets.NewString(restore.Spec.ExcludedResources...)
|
||||
for _, nonrestorable := range nonRestorableResources {
|
||||
|
@ -363,34 +366,34 @@ func (c *restoreController) completeAndValidate(objectStore cloudprovider.Object
|
|||
restore.Spec.ExcludedResources = append(restore.Spec.ExcludedResources, nonrestorable)
|
||||
}
|
||||
}
|
||||
var validationErrors []string
|
||||
|
||||
// validate that included resources don't contain any non-restorable resources
|
||||
includedResources := sets.NewString(restore.Spec.IncludedResources...)
|
||||
for _, nonRestorableResource := range nonRestorableResources {
|
||||
if includedResources.Has(nonRestorableResource) {
|
||||
validationErrors = append(validationErrors, fmt.Sprintf("%v are non-restorable resources", nonRestorableResource))
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("%v are non-restorable resources", nonRestorableResource))
|
||||
}
|
||||
}
|
||||
|
||||
// validate included/excluded resources
|
||||
for _, err := range collections.ValidateIncludesExcludes(restore.Spec.IncludedResources, restore.Spec.ExcludedResources) {
|
||||
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
|
||||
}
|
||||
|
||||
// validate included/excluded namespaces
|
||||
for _, err := range collections.ValidateIncludesExcludes(restore.Spec.IncludedNamespaces, restore.Spec.ExcludedNamespaces) {
|
||||
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
|
||||
}
|
||||
|
||||
// validate that PV provider exists if we're restoring PVs
|
||||
if boolptr.IsSetToTrue(restore.Spec.RestorePVs) && !c.pvProviderExists {
|
||||
validationErrors = append(validationErrors, "Server is not configured for PV snapshot restores")
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Server is not configured for PV snapshot restores")
|
||||
}
|
||||
|
||||
// validate that exactly one of BackupName and ScheduleName have been specified
|
||||
if !backupXorScheduleProvided(restore) {
|
||||
return append(validationErrors, "Either a backup or schedule must be specified as a source for the restore, but not both")
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Either a backup or schedule must be specified as a source for the restore, but not both")
|
||||
return backupInfo{}
|
||||
}
|
||||
|
||||
// if ScheduleName is specified, fill in BackupName with the most recent successful backup from
|
||||
|
@ -402,33 +405,33 @@ func (c *restoreController) completeAndValidate(objectStore cloudprovider.Object
|
|||
|
||||
backups, err := c.backupLister.Backups(c.namespace).List(selector)
|
||||
if err != nil {
|
||||
return append(validationErrors, "Unable to list backups for schedule")
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "Unable to list backups for schedule")
|
||||
return backupInfo{}
|
||||
}
|
||||
if len(backups) == 0 {
|
||||
return append(validationErrors, "No backups found for schedule")
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No backups found for schedule")
|
||||
}
|
||||
|
||||
if backup := mostRecentCompletedBackup(backups); backup != nil {
|
||||
restore.Spec.BackupName = backup.Name
|
||||
} else {
|
||||
return append(validationErrors, "No completed backups found for schedule")
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No completed backups found for schedule")
|
||||
return backupInfo{}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
backup *api.Backup
|
||||
err error
|
||||
)
|
||||
if backup, err = c.fetchBackup(objectStore, restore.Spec.BackupName); err != nil {
|
||||
return append(validationErrors, fmt.Sprintf("Error retrieving backup: %v", err))
|
||||
info, err := c.fetchBackupInfo(restore.Spec.BackupName, pluginManager)
|
||||
if err != nil {
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, fmt.Sprintf("Error retrieving backup: %v", err))
|
||||
return backupInfo{}
|
||||
}
|
||||
|
||||
// Fill in the ScheduleName so it's easier to consume for metrics.
|
||||
if restore.Spec.ScheduleName == "" {
|
||||
restore.Spec.ScheduleName = backup.GetLabels()["ark-schedule"]
|
||||
restore.Spec.ScheduleName = info.backup.GetLabels()["ark-schedule"]
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
return info
|
||||
}
|
||||
|
||||
// backupXorScheduleProvided returns true if exactly one of BackupName and
|
||||
|
@ -462,43 +465,112 @@ func mostRecentCompletedBackup(backups []*api.Backup) *api.Backup {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *restoreController) fetchBackup(objectStore cloudprovider.ObjectStore, name string) (*api.Backup, error) {
|
||||
backup, err := c.backupLister.Backups(c.namespace).Get(name)
|
||||
if err == nil {
|
||||
return backup, nil
|
||||
}
|
||||
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
logContext := c.logger.WithField("backupName", name)
|
||||
|
||||
logContext.Debug("Backup not found in backupLister, checking object storage directly")
|
||||
backup, err = c.getBackup(objectStore, c.bucket, name)
|
||||
// fetchBackupInfo checks the backup lister for a backup that matches the given name. If it doesn't
|
||||
// find it, it tries to retrieve it from one of the backup storage locations.
|
||||
func (c *restoreController) fetchBackupInfo(backupName string, pluginManager plugin.Manager) (backupInfo, error) {
|
||||
var info backupInfo
|
||||
var err error
|
||||
info.backup, err = c.backupLister.Backups(c.namespace).Get(backupName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return backupInfo{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
logContext := c.logger.WithField("backupName", backupName)
|
||||
logContext.Debug("Backup not found in backupLister, checking each backup location directly, starting with default...")
|
||||
return c.fetchFromBackupStorage(backupName, pluginManager)
|
||||
}
|
||||
|
||||
location, err := c.backupLocationLister.BackupStorageLocations(c.namespace).Get(info.backup.Spec.StorageLocation)
|
||||
if err != nil {
|
||||
return backupInfo{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
info.objectStore, err = getObjectStoreForLocation(location, pluginManager)
|
||||
if err != nil {
|
||||
return backupInfo{}, errors.Wrap(err, "error initializing object store")
|
||||
}
|
||||
info.bucketName = location.Spec.ObjectStorage.Bucket
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// fetchFromBackupStorage checks each backup storage location, starting with the default,
|
||||
// looking for a backup that matches the given backup name.
|
||||
func (c *restoreController) fetchFromBackupStorage(backupName string, pluginManager plugin.Manager) (backupInfo, error) {
|
||||
locations, err := c.backupLocationLister.BackupStorageLocations(c.namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return backupInfo{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
orderedLocations := orderedBackupLocations(locations, c.defaultBackupLocation)
|
||||
|
||||
logContext := c.logger.WithField("backupName", backupName)
|
||||
for _, location := range orderedLocations {
|
||||
info, err := c.backupInfoForLocation(location, backupName, pluginManager)
|
||||
if err != nil {
|
||||
logContext.WithField("locationName", location.Name).WithError(err).Error("Unable to fetch backup from object storage location")
|
||||
continue
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
return backupInfo{}, errors.New("not able to fetch from backup storage")
|
||||
}
|
||||
|
||||
// orderedBackupLocations returns a new slice with the default backup location first (if it exists),
|
||||
// followed by the rest of the locations in no particular order.
|
||||
func orderedBackupLocations(locations []*api.BackupStorageLocation, defaultLocationName string) []*api.BackupStorageLocation {
|
||||
var result []*api.BackupStorageLocation
|
||||
|
||||
for i := range locations {
|
||||
if locations[i].Name == defaultLocationName {
|
||||
// put the default location first
|
||||
result = append(result, locations[i])
|
||||
// append everything before the default
|
||||
result = append(result, locations[:i]...)
|
||||
// append everything after the default
|
||||
result = append(result, locations[i+1:]...)
|
||||
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
return locations
|
||||
}
|
||||
|
||||
func (c *restoreController) backupInfoForLocation(location *api.BackupStorageLocation, backupName string, pluginManager plugin.Manager) (backupInfo, error) {
|
||||
objectStore, err := getObjectStoreForLocation(location, pluginManager)
|
||||
if err != nil {
|
||||
return backupInfo{}, err
|
||||
}
|
||||
|
||||
backup, err := c.getBackup(objectStore, location.Spec.ObjectStorage.Bucket, backupName)
|
||||
if err != nil {
|
||||
return backupInfo{}, err
|
||||
}
|
||||
|
||||
// ResourceVersion needs to be cleared in order to create the object in the API
|
||||
backup.ResourceVersion = ""
|
||||
// Clear out the namespace too, just in case
|
||||
// Clear out the namespace, in case the backup was made in a different cluster, with a different namespace
|
||||
backup.Namespace = ""
|
||||
|
||||
created, createErr := c.backupClient.Backups(c.namespace).Create(backup)
|
||||
if createErr != nil {
|
||||
logContext.WithError(errors.WithStack(createErr)).Error("Unable to create API object for Backup")
|
||||
} else {
|
||||
backup = created
|
||||
backupCreated, err := c.backupClient.Backups(c.namespace).Create(backup)
|
||||
if err != nil {
|
||||
return backupInfo{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return backup, nil
|
||||
return backupInfo{
|
||||
bucketName: location.Spec.ObjectStorage.Bucket,
|
||||
backup: backupCreated,
|
||||
objectStore: objectStore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *restoreController) runRestore(
|
||||
restore *api.Restore,
|
||||
actions []restore.ItemAction,
|
||||
objectStore cloudprovider.ObjectStore,
|
||||
info backupInfo,
|
||||
) (restoreWarnings, restoreErrors api.RestoreResult, restoreFailure error) {
|
||||
logFile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
|
@ -530,14 +602,7 @@ func (c *restoreController) runRestore(
|
|||
"backup": restore.Spec.BackupName,
|
||||
})
|
||||
|
||||
backup, err := c.fetchBackup(objectStore, restore.Spec.BackupName)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Error("Error getting backup")
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
backupFile, err := downloadToTempFile(objectStore, c.bucket, restore.Spec.BackupName, c.downloadBackup, c.logger)
|
||||
backupFile, err := downloadToTempFile(info.objectStore, info.bucketName, restore.Spec.BackupName, c.downloadBackup, c.logger)
|
||||
if err != nil {
|
||||
logContext.WithError(err).Error("Error downloading backup")
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
|
@ -558,7 +623,7 @@ func (c *restoreController) runRestore(
|
|||
// Any return statement above this line means a total restore failure
|
||||
// Some failures after this line *may* be a total restore failure
|
||||
logContext.Info("starting restore")
|
||||
restoreWarnings, restoreErrors = c.restorer.Restore(logContext, restore, backup, backupFile, actions)
|
||||
restoreWarnings, restoreErrors = c.restorer.Restore(logContext, restore, info.backup, backupFile, actions)
|
||||
logContext.Info("restore completed")
|
||||
|
||||
// Try to upload the log file. This is best-effort. If we fail, we'll add to the ark errors.
|
||||
|
@ -571,7 +636,7 @@ func (c *restoreController) runRestore(
|
|||
return
|
||||
}
|
||||
|
||||
if err := c.uploadRestoreLog(objectStore, c.bucket, restore.Spec.BackupName, restore.Name, logFile); err != nil {
|
||||
if err := c.uploadRestoreLog(info.objectStore, info.bucketName, restore.Spec.BackupName, restore.Name, logFile); err != nil {
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, fmt.Sprintf("error uploading log file to object storage: %v", err))
|
||||
}
|
||||
|
||||
|
@ -592,7 +657,7 @@ func (c *restoreController) runRestore(
|
|||
logContext.WithError(errors.WithStack(err)).Error("Error resetting results file offset to 0")
|
||||
return
|
||||
}
|
||||
if err := c.uploadRestoreResults(objectStore, c.bucket, restore.Spec.BackupName, restore.Name, resultsFile); err != nil {
|
||||
if err := c.uploadRestoreResults(info.objectStore, info.bucketName, restore.Spec.BackupName, restore.Name, resultsFile); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error uploading results files to object storage")
|
||||
}
|
||||
|
||||
|
|
|
@ -19,17 +19,16 @@ package controller
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
@ -47,10 +46,11 @@ import (
|
|||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestFetchBackup(t *testing.T) {
|
||||
func TestFetchBackupInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backupName string
|
||||
informerLocations []*api.BackupStorageLocation
|
||||
informerBackups []*api.Backup
|
||||
backupServiceBackup *api.Backup
|
||||
backupServiceError error
|
||||
|
@ -58,16 +58,19 @@ func TestFetchBackup(t *testing.T) {
|
|||
expectedErr bool
|
||||
}{
|
||||
{
|
||||
name: "lister has backup",
|
||||
backupName: "backup-1",
|
||||
informerBackups: []*api.Backup{arktest.NewTestBackup().WithName("backup-1").Backup},
|
||||
expectedRes: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
name: "lister has backup",
|
||||
backupName: "backup-1",
|
||||
informerLocations: []*api.BackupStorageLocation{arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation},
|
||||
informerBackups: []*api.Backup{arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup},
|
||||
expectedRes: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
},
|
||||
{
|
||||
name: "backupSvc has backup",
|
||||
name: "lister does not have a backup, but backupSvc does",
|
||||
backupName: "backup-1",
|
||||
backupServiceBackup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
expectedRes: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backupServiceBackup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
informerLocations: []*api.BackupStorageLocation{arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation},
|
||||
informerBackups: []*api.Backup{arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup},
|
||||
expectedRes: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
},
|
||||
{
|
||||
name: "no backup",
|
||||
|
@ -84,26 +87,40 @@ func TestFetchBackup(t *testing.T) {
|
|||
restorer = &fakeRestorer{}
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger = arktest.NewLogger()
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
objectStore = &arktest.ObjectStore{}
|
||||
)
|
||||
|
||||
defer restorer.AssertExpectations(t)
|
||||
defer objectStore.AssertExpectations(t)
|
||||
|
||||
c := NewRestoreController(
|
||||
api.DefaultNamespace,
|
||||
sharedInformers.Ark().V1().Restores(),
|
||||
client.ArkV1(),
|
||||
client.ArkV1(),
|
||||
restorer,
|
||||
api.CloudProviderConfig{},
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
false,
|
||||
logger,
|
||||
logrus.InfoLevel,
|
||||
nil, //pluginRegistry
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
"default",
|
||||
metrics.NewServerMetrics(),
|
||||
).(*restoreController)
|
||||
|
||||
for _, itm := range test.informerBackups {
|
||||
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(itm)
|
||||
if test.backupServiceError == nil {
|
||||
pluginManager.On("GetObjectStore", "myCloud").Return(objectStore, nil)
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
for _, itm := range test.informerLocations {
|
||||
sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(itm)
|
||||
}
|
||||
|
||||
for _, itm := range test.informerBackups {
|
||||
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(itm)
|
||||
}
|
||||
}
|
||||
|
||||
if test.backupServiceBackup != nil || test.backupServiceError != nil {
|
||||
|
@ -114,10 +131,10 @@ func TestFetchBackup(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
backup, err := c.fetchBackup(nil, test.backupName)
|
||||
info, err := c.fetchBackupInfo(test.backupName, pluginManager)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
assert.Equal(t, test.expectedRes, backup)
|
||||
assert.Equal(t, test.expectedRes, info.backup)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -175,18 +192,15 @@ func TestProcessRestoreSkips(t *testing.T) {
|
|||
client.ArkV1(),
|
||||
client.ArkV1(),
|
||||
restorer,
|
||||
api.CloudProviderConfig{Name: "myCloud"},
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
false, // pvProviderExists
|
||||
logger,
|
||||
logrus.InfoLevel,
|
||||
nil, // pluginRegistry
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
"default",
|
||||
metrics.NewServerMetrics(),
|
||||
).(*restoreController)
|
||||
c.newPluginManager = func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager {
|
||||
return pluginManager
|
||||
}
|
||||
|
||||
if test.restore != nil {
|
||||
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore)
|
||||
|
@ -197,10 +211,12 @@ func TestProcessRestoreSkips(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessRestore(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
restoreKey string
|
||||
location *api.BackupStorageLocation
|
||||
restore *api.Restore
|
||||
backup *api.Backup
|
||||
restorerError error
|
||||
|
@ -217,16 +233,18 @@ func TestProcessRestore(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).WithExcludedNamespace("another-1").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: another-1"},
|
||||
},
|
||||
{
|
||||
name: "restore with resource in both includedResources and excludedResources fails validation",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).WithExcludedResource("a-resource").Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: a-resource"},
|
||||
|
@ -246,11 +264,13 @@ func TestProcessRestore(t *testing.T) {
|
|||
expectedValidationErrors: []string{"Either a backup or schedule must be specified as a source for the restore, but not both"},
|
||||
},
|
||||
{
|
||||
name: "valid restore with schedule name gets executed",
|
||||
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).WithSchedule("sched-1").Restore,
|
||||
name: "valid restore with schedule name gets executed",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).WithSchedule("sched-1").Restore,
|
||||
backup: arktest.
|
||||
NewTestBackup().
|
||||
WithName("backup-1").
|
||||
WithStorageLocation("default").
|
||||
WithLabel("ark-schedule", "sched-1").
|
||||
WithPhase(api.BackupPhaseCompleted).
|
||||
Backup,
|
||||
|
@ -263,13 +283,14 @@ func TestProcessRestore(t *testing.T) {
|
|||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseNew).Restore,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Error retrieving backup: no backup here"},
|
||||
expectedValidationErrors: []string{"Error retrieving backup: not able to fetch from backup storage"},
|
||||
backupServiceGetBackupError: errors.New("no backup here"),
|
||||
},
|
||||
{
|
||||
name: "restorer throwing an error causes the restore to fail",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
restorerError: errors.New("blarg"),
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
|
@ -278,16 +299,18 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "valid restore gets executed",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
},
|
||||
{
|
||||
name: "valid restore with RestorePVs=true gets executed when allowRestoreSnapshots=true",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).WithRestorePVs(true).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
allowRestoreSnapshots: true,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
|
@ -295,16 +318,18 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).WithRestorePVs(true).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{"Server is not configured for PV snapshot restores"},
|
||||
},
|
||||
{
|
||||
name: "restoration of nodes is not supported",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
|
@ -314,8 +339,9 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "restoration of events is not supported",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
|
@ -325,8 +351,9 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "restoration of events.events.k8s.io is not supported",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events.events.k8s.io", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
|
@ -336,8 +363,9 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "restoration of backups.ark.heptio.com is not supported",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "backups.ark.heptio.com", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
|
@ -347,8 +375,9 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "restoration of restores.ark.heptio.com is not supported",
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "restores.ark.heptio.com", api.RestorePhaseNew).Restore,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
expectedErr: false,
|
||||
expectedPhase: string(api.RestorePhaseFailedValidation),
|
||||
expectedValidationErrors: []string{
|
||||
|
@ -358,11 +387,12 @@ func TestProcessRestore(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "backup download error results in failed restore",
|
||||
restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation,
|
||||
restore: NewRestore(api.DefaultNamespace, "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore,
|
||||
expectedPhase: string(api.RestorePhaseInProgress),
|
||||
expectedFinalPhase: string(api.RestorePhaseFailed),
|
||||
backupServiceDownloadBackupError: errors.New("Couldn't download backup"),
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
|
||||
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -377,6 +407,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
objectStore = &arktest.ObjectStore{}
|
||||
)
|
||||
defer restorer.AssertExpectations(t)
|
||||
|
||||
defer objectStore.AssertExpectations(t)
|
||||
|
||||
c := NewRestoreController(
|
||||
|
@ -385,23 +416,26 @@ func TestProcessRestore(t *testing.T) {
|
|||
client.ArkV1(),
|
||||
client.ArkV1(),
|
||||
restorer,
|
||||
api.CloudProviderConfig{Name: "myCloud"},
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
test.allowRestoreSnapshots,
|
||||
logger,
|
||||
logrus.InfoLevel,
|
||||
nil, // pluginRegistry
|
||||
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
|
||||
"default",
|
||||
metrics.NewServerMetrics(),
|
||||
).(*restoreController)
|
||||
c.newPluginManager = func(logger logrus.FieldLogger, logLevel logrus.Level, pluginRegistry plugin.Registry) plugin.Manager {
|
||||
return pluginManager
|
||||
|
||||
if test.location != nil {
|
||||
sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.location)
|
||||
}
|
||||
if test.backup != nil {
|
||||
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
|
||||
pluginManager.On("GetObjectStore", "myCloud").Return(objectStore, nil)
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
}
|
||||
|
||||
if test.restore != nil {
|
||||
pluginManager.On("GetObjectStore", "myCloud").Return(objectStore, nil)
|
||||
objectStore.On("Init", mock.Anything).Return(nil)
|
||||
|
||||
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore)
|
||||
|
||||
// this is necessary so the Patch() call returns the appropriate object
|
||||
|
@ -590,11 +624,12 @@ func TestProcessRestore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCompleteAndValidateWhenScheduleNameSpecified(t *testing.T) {
|
||||
func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) {
|
||||
var (
|
||||
client = fake.NewSimpleClientset()
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
logger = arktest.NewLogger()
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
)
|
||||
|
||||
c := NewRestoreController(
|
||||
|
@ -603,13 +638,13 @@ func TestCompleteAndValidateWhenScheduleNameSpecified(t *testing.T) {
|
|||
client.ArkV1(),
|
||||
client.ArkV1(),
|
||||
nil,
|
||||
api.CloudProviderConfig{Name: "myCloud"},
|
||||
"bucket",
|
||||
sharedInformers.Ark().V1().Backups(),
|
||||
sharedInformers.Ark().V1().BackupStorageLocations(),
|
||||
false,
|
||||
logger,
|
||||
logrus.DebugLevel,
|
||||
nil,
|
||||
"default",
|
||||
nil,
|
||||
).(*restoreController)
|
||||
|
||||
|
@ -632,7 +667,7 @@ func TestCompleteAndValidateWhenScheduleNameSpecified(t *testing.T) {
|
|||
Backup,
|
||||
))
|
||||
|
||||
errs := c.completeAndValidate(nil, restore)
|
||||
errs := c.validateAndComplete(restore, pluginManager)
|
||||
assert.Equal(t, []string{"No backups found for schedule"}, errs)
|
||||
assert.Empty(t, restore.Spec.BackupName)
|
||||
|
||||
|
@ -645,7 +680,7 @@ func TestCompleteAndValidateWhenScheduleNameSpecified(t *testing.T) {
|
|||
Backup,
|
||||
))
|
||||
|
||||
errs = c.completeAndValidate(nil, restore)
|
||||
errs = c.validateAndComplete(restore, pluginManager)
|
||||
assert.Equal(t, []string{"No completed backups found for schedule"}, errs)
|
||||
assert.Empty(t, restore.Spec.BackupName)
|
||||
|
||||
|
@ -669,7 +704,7 @@ func TestCompleteAndValidateWhenScheduleNameSpecified(t *testing.T) {
|
|||
Backup,
|
||||
))
|
||||
|
||||
errs = c.completeAndValidate(nil, restore)
|
||||
errs = c.validateAndComplete(restore, pluginManager)
|
||||
assert.Nil(t, errs)
|
||||
assert.Equal(t, "bar", restore.Spec.BackupName)
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
type ArkV1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
BackupsGetter
|
||||
BackupStorageLocationsGetter
|
||||
ConfigsGetter
|
||||
DeleteBackupRequestsGetter
|
||||
DownloadRequestsGetter
|
||||
|
@ -47,6 +48,10 @@ func (c *ArkV1Client) Backups(namespace string) BackupInterface {
|
|||
return newBackups(c, namespace)
|
||||
}
|
||||
|
||||
func (c *ArkV1Client) BackupStorageLocations(namespace string) BackupStorageLocationInterface {
|
||||
return newBackupStorageLocations(c, namespace)
|
||||
}
|
||||
|
||||
func (c *ArkV1Client) Configs(namespace string) ConfigInterface {
|
||||
return newConfigs(c, namespace)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,174 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// BackupStorageLocationsGetter has a method to return a BackupStorageLocationInterface.
|
||||
// A group's client should implement this interface.
|
||||
type BackupStorageLocationsGetter interface {
|
||||
BackupStorageLocations(namespace string) BackupStorageLocationInterface
|
||||
}
|
||||
|
||||
// BackupStorageLocationInterface has methods to work with BackupStorageLocation resources.
|
||||
type BackupStorageLocationInterface interface {
|
||||
Create(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error)
|
||||
Update(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error)
|
||||
UpdateStatus(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error)
|
||||
Delete(name string, options *meta_v1.DeleteOptions) error
|
||||
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
|
||||
Get(name string, options meta_v1.GetOptions) (*v1.BackupStorageLocation, error)
|
||||
List(opts meta_v1.ListOptions) (*v1.BackupStorageLocationList, error)
|
||||
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.BackupStorageLocation, err error)
|
||||
BackupStorageLocationExpansion
|
||||
}
|
||||
|
||||
// backupStorageLocations implements BackupStorageLocationInterface
|
||||
type backupStorageLocations struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newBackupStorageLocations returns a BackupStorageLocations
|
||||
func newBackupStorageLocations(c *ArkV1Client, namespace string) *backupStorageLocations {
|
||||
return &backupStorageLocations{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the backupStorageLocation, and returns the corresponding backupStorageLocation object, and an error if there is any.
|
||||
func (c *backupStorageLocations) Get(name string, options meta_v1.GetOptions) (result *v1.BackupStorageLocation, err error) {
|
||||
result = &v1.BackupStorageLocation{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors.
|
||||
func (c *backupStorageLocations) List(opts meta_v1.ListOptions) (result *v1.BackupStorageLocationList, err error) {
|
||||
result = &v1.BackupStorageLocationList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested backupStorageLocations.
|
||||
func (c *backupStorageLocations) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a backupStorageLocation and creates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any.
|
||||
func (c *backupStorageLocations) Create(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) {
|
||||
result = &v1.BackupStorageLocation{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
Body(backupStorageLocation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a backupStorageLocation and updates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any.
|
||||
func (c *backupStorageLocations) Update(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) {
|
||||
result = &v1.BackupStorageLocation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
Name(backupStorageLocation.Name).
|
||||
Body(backupStorageLocation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *backupStorageLocations) UpdateStatus(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) {
|
||||
result = &v1.BackupStorageLocation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
Name(backupStorageLocation.Name).
|
||||
SubResource("status").
|
||||
Body(backupStorageLocation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the backupStorageLocation and deletes it. Returns an error if one occurs.
|
||||
func (c *backupStorageLocations) Delete(name string, options *meta_v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *backupStorageLocations) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched backupStorageLocation.
|
||||
func (c *backupStorageLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.BackupStorageLocation, err error) {
|
||||
result = &v1.BackupStorageLocation{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -32,6 +32,10 @@ func (c *FakeArkV1) Backups(namespace string) v1.BackupInterface {
|
|||
return &FakeBackups{c, namespace}
|
||||
}
|
||||
|
||||
func (c *FakeArkV1) BackupStorageLocations(namespace string) v1.BackupStorageLocationInterface {
|
||||
return &FakeBackupStorageLocations{c, namespace}
|
||||
}
|
||||
|
||||
func (c *FakeArkV1) Configs(namespace string) v1.ConfigInterface {
|
||||
return &FakeConfigs{c, namespace}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeBackupStorageLocations implements BackupStorageLocationInterface
|
||||
type FakeBackupStorageLocations struct {
|
||||
Fake *FakeArkV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var backupstoragelocationsResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "backupstoragelocations"}
|
||||
|
||||
var backupstoragelocationsKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "BackupStorageLocation"}
|
||||
|
||||
// Get takes name of the backupStorageLocation, and returns the corresponding backupStorageLocation object, and an error if there is any.
|
||||
func (c *FakeBackupStorageLocations) Get(name string, options v1.GetOptions) (result *ark_v1.BackupStorageLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(backupstoragelocationsResource, c.ns, name), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*ark_v1.BackupStorageLocation), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors.
|
||||
func (c *FakeBackupStorageLocations) List(opts v1.ListOptions) (result *ark_v1.BackupStorageLocationList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(backupstoragelocationsResource, backupstoragelocationsKind, c.ns, opts), &ark_v1.BackupStorageLocationList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &ark_v1.BackupStorageLocationList{ListMeta: obj.(*ark_v1.BackupStorageLocationList).ListMeta}
|
||||
for _, item := range obj.(*ark_v1.BackupStorageLocationList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested backupStorageLocations.
|
||||
func (c *FakeBackupStorageLocations) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(backupstoragelocationsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a backupStorageLocation and creates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any.
|
||||
func (c *FakeBackupStorageLocations) Create(backupStorageLocation *ark_v1.BackupStorageLocation) (result *ark_v1.BackupStorageLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(backupstoragelocationsResource, c.ns, backupStorageLocation), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*ark_v1.BackupStorageLocation), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a backupStorageLocation and updates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any.
|
||||
func (c *FakeBackupStorageLocations) Update(backupStorageLocation *ark_v1.BackupStorageLocation) (result *ark_v1.BackupStorageLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(backupstoragelocationsResource, c.ns, backupStorageLocation), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*ark_v1.BackupStorageLocation), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeBackupStorageLocations) UpdateStatus(backupStorageLocation *ark_v1.BackupStorageLocation) (*ark_v1.BackupStorageLocation, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(backupstoragelocationsResource, "status", c.ns, backupStorageLocation), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*ark_v1.BackupStorageLocation), err
|
||||
}
|
||||
|
||||
// Delete takes name of the backupStorageLocation and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeBackupStorageLocations) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(backupstoragelocationsResource, c.ns, name), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeBackupStorageLocations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(backupstoragelocationsResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &ark_v1.BackupStorageLocationList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched backupStorageLocation.
|
||||
func (c *FakeBackupStorageLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *ark_v1.BackupStorageLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(backupstoragelocationsResource, c.ns, name, data, subresources...), &ark_v1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*ark_v1.BackupStorageLocation), err
|
||||
}
|
|
@ -20,6 +20,8 @@ package v1
|
|||
|
||||
type BackupExpansion interface{}
|
||||
|
||||
type BackupStorageLocationExpansion interface{}
|
||||
|
||||
type ConfigExpansion interface{}
|
||||
|
||||
type DeleteBackupRequestExpansion interface{}
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
versioned "github.com/heptio/ark/pkg/generated/clientset/versioned"
|
||||
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// BackupStorageLocationInformer provides access to a shared informer and lister for
|
||||
// BackupStorageLocations.
|
||||
type BackupStorageLocationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.BackupStorageLocationLister
|
||||
}
|
||||
|
||||
type backupStorageLocationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredBackupStorageLocationInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.ArkV1().BackupStorageLocations(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.ArkV1().BackupStorageLocations(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&ark_v1.BackupStorageLocation{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *backupStorageLocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredBackupStorageLocationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *backupStorageLocationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&ark_v1.BackupStorageLocation{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *backupStorageLocationInformer) Lister() v1.BackupStorageLocationLister {
|
||||
return v1.NewBackupStorageLocationLister(f.Informer().GetIndexer())
|
||||
}
|
|
@ -26,6 +26,8 @@ import (
|
|||
type Interface interface {
|
||||
// Backups returns a BackupInformer.
|
||||
Backups() BackupInformer
|
||||
// BackupStorageLocations returns a BackupStorageLocationInformer.
|
||||
BackupStorageLocations() BackupStorageLocationInformer
|
||||
// Configs returns a ConfigInformer.
|
||||
Configs() ConfigInformer
|
||||
// DeleteBackupRequests returns a DeleteBackupRequestInformer.
|
||||
|
@ -60,6 +62,11 @@ func (v *version) Backups() BackupInformer {
|
|||
return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// BackupStorageLocations returns a BackupStorageLocationInformer.
|
||||
func (v *version) BackupStorageLocations() BackupStorageLocationInformer {
|
||||
return &backupStorageLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// Configs returns a ConfigInformer.
|
||||
func (v *version) Configs() ConfigInformer {
|
||||
return &configInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
|
|
|
@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||
// Group=ark.heptio.com, Version=v1
|
||||
case v1.SchemeGroupVersion.WithResource("backups"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().Backups().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("backupstoragelocations"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().BackupStorageLocations().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("configs"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().Configs().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("deletebackuprequests"):
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// BackupStorageLocationLister helps list BackupStorageLocations.
|
||||
type BackupStorageLocationLister interface {
|
||||
// List lists all BackupStorageLocations in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error)
|
||||
// BackupStorageLocations returns an object that can list and get BackupStorageLocations.
|
||||
BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister
|
||||
BackupStorageLocationListerExpansion
|
||||
}
|
||||
|
||||
// backupStorageLocationLister implements the BackupStorageLocationLister interface.
|
||||
type backupStorageLocationLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewBackupStorageLocationLister returns a new BackupStorageLocationLister.
|
||||
func NewBackupStorageLocationLister(indexer cache.Indexer) BackupStorageLocationLister {
|
||||
return &backupStorageLocationLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all BackupStorageLocations in the indexer.
|
||||
func (s *backupStorageLocationLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.BackupStorageLocation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// BackupStorageLocations returns an object that can list and get BackupStorageLocations.
|
||||
func (s *backupStorageLocationLister) BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister {
|
||||
return backupStorageLocationNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// BackupStorageLocationNamespaceLister helps list and get BackupStorageLocations.
|
||||
type BackupStorageLocationNamespaceLister interface {
|
||||
// List lists all BackupStorageLocations in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error)
|
||||
// Get retrieves the BackupStorageLocation from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1.BackupStorageLocation, error)
|
||||
BackupStorageLocationNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// backupStorageLocationNamespaceLister implements the BackupStorageLocationNamespaceLister
|
||||
// interface.
|
||||
type backupStorageLocationNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all BackupStorageLocations in the indexer for a given namespace.
|
||||
func (s backupStorageLocationNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.BackupStorageLocation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the BackupStorageLocation from the indexer for a given namespace and name.
|
||||
func (s backupStorageLocationNamespaceLister) Get(name string) (*v1.BackupStorageLocation, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1.Resource("backupstoragelocation"), name)
|
||||
}
|
||||
return obj.(*v1.BackupStorageLocation), nil
|
||||
}
|
|
@ -26,6 +26,14 @@ type BackupListerExpansion interface{}
|
|||
// BackupNamespaceLister.
|
||||
type BackupNamespaceListerExpansion interface{}
|
||||
|
||||
// BackupStorageLocationListerExpansion allows custom methods to be added to
|
||||
// BackupStorageLocationLister.
|
||||
type BackupStorageLocationListerExpansion interface{}
|
||||
|
||||
// BackupStorageLocationNamespaceListerExpansion allows custom methods to be added to
|
||||
// BackupStorageLocationNamespaceLister.
|
||||
type BackupStorageLocationNamespaceListerExpansion interface{}
|
||||
|
||||
// ConfigListerExpansion allows custom methods to be added to
|
||||
// ConfigLister.
|
||||
type ConfigListerExpansion interface{}
|
||||
|
|
|
@ -17,83 +17,19 @@ limitations under the License.
|
|||
package install
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
arkv1 "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
type arkConfigOption func(*arkConfig)
|
||||
|
||||
type arkConfig struct {
|
||||
backupSyncPeriod time.Duration
|
||||
gcSyncPeriod time.Duration
|
||||
podVolumeOperationTimeout time.Duration
|
||||
restoreOnly bool
|
||||
resticLocation string
|
||||
}
|
||||
|
||||
func WithBackupSyncPeriod(t time.Duration) arkConfigOption {
|
||||
return func(c *arkConfig) {
|
||||
c.backupSyncPeriod = t
|
||||
}
|
||||
}
|
||||
|
||||
func WithGCSyncPeriod(t time.Duration) arkConfigOption {
|
||||
return func(c *arkConfig) {
|
||||
c.gcSyncPeriod = t
|
||||
}
|
||||
}
|
||||
|
||||
func WithPodVolumeOperationTimeout(t time.Duration) arkConfigOption {
|
||||
return func(c *arkConfig) {
|
||||
c.podVolumeOperationTimeout = t
|
||||
}
|
||||
}
|
||||
|
||||
func WithRestoreOnly() arkConfigOption {
|
||||
return func(c *arkConfig) {
|
||||
c.restoreOnly = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithResticLocation(location string) arkConfigOption {
|
||||
return func(c *arkConfig) {
|
||||
c.resticLocation = location
|
||||
}
|
||||
}
|
||||
|
||||
func Config(
|
||||
namespace string,
|
||||
pvCloudProviderName string,
|
||||
pvCloudProviderConfig map[string]string,
|
||||
backupCloudProviderName string,
|
||||
backupCloudProviderConfig map[string]string,
|
||||
bucket string,
|
||||
opts ...arkConfigOption,
|
||||
) *arkv1.Config {
|
||||
c := &arkConfig{
|
||||
backupSyncPeriod: 30 * time.Minute,
|
||||
gcSyncPeriod: 30 * time.Minute,
|
||||
podVolumeOperationTimeout: 60 * time.Minute,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
return &arkv1.Config{
|
||||
ObjectMeta: objectMeta(namespace, "default"),
|
||||
PersistentVolumeProvider: &arkv1.CloudProviderConfig{
|
||||
Name: pvCloudProviderName,
|
||||
Config: pvCloudProviderConfig,
|
||||
},
|
||||
BackupStorageProvider: arkv1.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1.CloudProviderConfig{
|
||||
Name: backupCloudProviderName,
|
||||
Config: backupCloudProviderConfig,
|
||||
},
|
||||
Bucket: bucket,
|
||||
ResticLocation: c.resticLocation,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ const (
|
|||
DaemonSet = "restic"
|
||||
InitContainer = "restic-wait"
|
||||
DefaultMaintenanceFrequency = 24 * time.Hour
|
||||
ResticLocationConfigKey = "restic-location"
|
||||
|
||||
podAnnotationPrefix = "snapshot.ark.heptio.com/"
|
||||
volumesToBackupAnnotation = "backup.ark.heptio.com/backup-volumes"
|
||||
|
|
|
@ -38,9 +38,10 @@ var getAWSBucketRegion = aws.GetBucketRegion
|
|||
|
||||
// getRepoPrefix returns the prefix of the value of the --repo flag for
|
||||
// restic commands, i.e. everything except the "/<repo-name>".
|
||||
func getRepoPrefix(config arkv1api.ObjectStorageProviderConfig) string {
|
||||
func getRepoPrefix(location *arkv1api.BackupStorageLocation) string {
|
||||
var (
|
||||
parts = strings.SplitN(config.ResticLocation, "/", 2)
|
||||
resticLocation = location.Spec.Config[ResticLocationConfigKey]
|
||||
parts = strings.SplitN(resticLocation, "/", 2)
|
||||
bucket, path, prefix string
|
||||
)
|
||||
|
||||
|
@ -51,13 +52,13 @@ func getRepoPrefix(config arkv1api.ObjectStorageProviderConfig) string {
|
|||
path = parts[1]
|
||||
}
|
||||
|
||||
switch BackendType(config.Name) {
|
||||
switch BackendType(location.Spec.Provider) {
|
||||
case AWSBackend:
|
||||
var url string
|
||||
switch {
|
||||
// non-AWS, S3-compatible object store
|
||||
case config.Config["s3Url"] != "":
|
||||
url = config.Config["s3Url"]
|
||||
case location.Spec.Config["s3Url"] != "":
|
||||
url = location.Spec.Config["s3Url"]
|
||||
default:
|
||||
region, err := getAWSBucketRegion(bucket)
|
||||
if err != nil {
|
||||
|
@ -68,7 +69,7 @@ func getRepoPrefix(config arkv1api.ObjectStorageProviderConfig) string {
|
|||
url = fmt.Sprintf("s3-%s.amazonaws.com", region)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("s3:%s/%s", url, config.ResticLocation)
|
||||
return fmt.Sprintf("s3:%s/%s", url, resticLocation)
|
||||
case AzureBackend:
|
||||
prefix = "azure"
|
||||
case GCPBackend:
|
||||
|
@ -80,8 +81,8 @@ func getRepoPrefix(config arkv1api.ObjectStorageProviderConfig) string {
|
|||
|
||||
// GetRepoIdentifier returns the string to be used as the value of the --repo flag in
|
||||
// restic commands for the given repository.
|
||||
func GetRepoIdentifier(config arkv1api.ObjectStorageProviderConfig, name string) string {
|
||||
prefix := getRepoPrefix(config)
|
||||
func GetRepoIdentifier(location *arkv1api.BackupStorageLocation, name string) string {
|
||||
prefix := getRepoPrefix(location)
|
||||
|
||||
return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name)
|
||||
}
|
||||
|
|
|
@ -30,47 +30,60 @@ func TestGetRepoIdentifier(t *testing.T) {
|
|||
getAWSBucketRegion = func(string) (string, error) {
|
||||
return "", errors.New("no region found")
|
||||
}
|
||||
config := arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{Name: "aws"},
|
||||
ResticLocation: "bucket/prefix",
|
||||
|
||||
backupLocation := &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
Config: map[string]string{ResticLocationConfigKey: "bucket/prefix"},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "s3:s3.amazonaws.com/bucket/prefix/repo-1", GetRepoIdentifier(config, "repo-1"))
|
||||
assert.Equal(t, "s3:s3.amazonaws.com/bucket/prefix/repo-1", GetRepoIdentifier(backupLocation, "repo-1"))
|
||||
|
||||
// stub implementation of getAWSBucketRegion
|
||||
getAWSBucketRegion = func(string) (string, error) {
|
||||
return "us-west-2", nil
|
||||
}
|
||||
|
||||
config = arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{Name: "aws"},
|
||||
ResticLocation: "bucket",
|
||||
}
|
||||
assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/repo-1", GetRepoIdentifier(config, "repo-1"))
|
||||
|
||||
config = arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{Name: "aws"},
|
||||
ResticLocation: "bucket/prefix",
|
||||
}
|
||||
assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/prefix/repo-1", GetRepoIdentifier(config, "repo-1"))
|
||||
|
||||
config = arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{
|
||||
Name: "aws",
|
||||
Config: map[string]string{"s3Url": "alternate-url"},
|
||||
backupLocation = &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
Config: map[string]string{ResticLocationConfigKey: "bucket"},
|
||||
},
|
||||
ResticLocation: "bucket/prefix",
|
||||
}
|
||||
assert.Equal(t, "s3:alternate-url/bucket/prefix/repo-1", GetRepoIdentifier(config, "repo-1"))
|
||||
assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/repo-1", GetRepoIdentifier(backupLocation, "repo-1"))
|
||||
|
||||
config = arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{Name: "azure"},
|
||||
ResticLocation: "bucket/prefix",
|
||||
backupLocation = &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
Config: map[string]string{ResticLocationConfigKey: "bucket/prefix"},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "azure:bucket:/prefix/repo-1", GetRepoIdentifier(config, "repo-1"))
|
||||
assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/prefix/repo-1", GetRepoIdentifier(backupLocation, "repo-1"))
|
||||
|
||||
config = arkv1api.ObjectStorageProviderConfig{
|
||||
CloudProviderConfig: arkv1api.CloudProviderConfig{Name: "gcp"},
|
||||
ResticLocation: "bucket-2/prefix-2",
|
||||
backupLocation = &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
Config: map[string]string{
|
||||
ResticLocationConfigKey: "bucket/prefix",
|
||||
"s3Url": "alternate-url",
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "gs:bucket-2:/prefix-2/repo-2", GetRepoIdentifier(config, "repo-2"))
|
||||
assert.Equal(t, "s3:alternate-url/bucket/prefix/repo-1", GetRepoIdentifier(backupLocation, "repo-1"))
|
||||
|
||||
backupLocation = &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
Config: map[string]string{ResticLocationConfigKey: "bucket/prefix"},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "azure:bucket:/prefix/repo-1", GetRepoIdentifier(backupLocation, "repo-1"))
|
||||
|
||||
backupLocation = &arkv1api.BackupStorageLocation{
|
||||
Spec: arkv1api.BackupStorageLocationSpec{
|
||||
Provider: "gcp",
|
||||
Config: map[string]string{ResticLocationConfigKey: "bucket-2/prefix-2"},
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "gs:bucket-2:/prefix-2/repo-2", GetRepoIdentifier(backupLocation, "repo-2"))
|
||||
}
|
||||
|
|
|
@ -135,3 +135,8 @@ func (b *TestBackup) WithStartTimestamp(startTime time.Time) *TestBackup {
|
|||
b.Status.StartTimestamp = metav1.Time{Time: startTime}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackup) WithStorageLocation(location string) *TestBackup {
|
||||
b.Spec.StorageLocation = location
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package test
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
type TestBackupStorageLocation struct {
|
||||
*v1.BackupStorageLocation
|
||||
}
|
||||
|
||||
func NewTestBackupStorageLocation() *TestBackupStorageLocation {
|
||||
return &TestBackupStorageLocation{
|
||||
BackupStorageLocation: &v1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *TestBackupStorageLocation) WithNamespace(namespace string) *TestBackupStorageLocation {
|
||||
b.Namespace = namespace
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackupStorageLocation) WithName(name string) *TestBackupStorageLocation {
|
||||
b.Name = name
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackupStorageLocation) WithLabel(key, value string) *TestBackupStorageLocation {
|
||||
if b.Labels == nil {
|
||||
b.Labels = make(map[string]string)
|
||||
}
|
||||
b.Labels[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackupStorageLocation) WithProvider(name string) *TestBackupStorageLocation {
|
||||
b.Spec.Provider = name
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *TestBackupStorageLocation) WithObjectStorage(bucketName string) *TestBackupStorageLocation {
|
||||
if b.Spec.StorageType.ObjectStorage == nil {
|
||||
b.Spec.StorageType.ObjectStorage = &v1.ObjectStorageLocation{}
|
||||
}
|
||||
b.Spec.ObjectStorage.Bucket = bucketName
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
Microsoft Azure-SDK-for-Go
|
||||
Copyright 2014-2017 Microsoft
|
||||
|
||||
This product includes software developed at
|
||||
the Microsoft Corporation (https://www.microsoft.com).
|
|
@ -19,10 +19,10 @@ package disk
|
|||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/v10.2.0-beta arm-disk/2016-04-30-preview"
|
||||
return "Azure-SDK-For-Go/v11.3.0-beta arm-disk/2016-04-30-preview"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return "v10.2.0-beta"
|
||||
return "v11.3.0-beta"
|
||||
}
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
package helpers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
)
|
||||
|
||||
const (
|
||||
credentialsPath = "/.azure/credentials.json"
|
||||
)
|
||||
|
||||
// ToJSON returns the passed item as a pretty-printed JSON string. If any JSON error occurs,
|
||||
// it returns the empty string.
|
||||
func ToJSON(v interface{}) (string, error) {
|
||||
j, err := json.MarshalIndent(v, "", " ")
|
||||
return string(j), err
|
||||
}
|
||||
|
||||
// NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the
|
||||
// passed credentials map.
|
||||
func NewServicePrincipalTokenFromCredentials(c map[string]string, scope string) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, c["AZURE_TENANT_ID"])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return adal.NewServicePrincipalToken(*oauthConfig, c["AZURE_CLIENT_ID"], c["AZURE_CLIENT_SECRET"], scope)
|
||||
}
|
||||
|
||||
func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string {
|
||||
mapOfStrings := make(map[string]string)
|
||||
for key, value := range mapOfInterface {
|
||||
mapOfStrings[key] = ensureValueString(value)
|
||||
}
|
||||
return mapOfStrings
|
||||
}
|
||||
|
||||
func ensureValueString(value interface{}) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
return v
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
952
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/accounts.go
generated
vendored
Normal file
952
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/accounts.go
generated
vendored
Normal file
|
@ -0,0 +1,952 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// AccountsClient is the the Azure Storage Management API.
|
||||
type AccountsClient struct {
|
||||
ManagementClient
|
||||
}
|
||||
|
||||
// NewAccountsClient creates an instance of the AccountsClient client.
|
||||
func NewAccountsClient(subscriptionID string) AccountsClient {
|
||||
return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client.
|
||||
func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient {
|
||||
return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// CheckNameAvailability checks that the storage account name is valid and is not already in use.
|
||||
//
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil},
|
||||
{Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "CheckNameAvailability")
|
||||
}
|
||||
|
||||
req, err := client.CheckNameAvailabilityPreparer(accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CheckNameAvailabilitySender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CheckNameAvailabilityResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request.
|
||||
func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters),
|
||||
autorest.WithJSON(accountName),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Create asynchronously creates a new storage account with the specified parameters. If an account is already created
|
||||
// and a subsequent create request is issued with different properties, the account properties will be updated. If an
|
||||
// account is already created and a subsequent create or update request is issued with the exact same set of
|
||||
// properties, the request will succeed. This method may poll for completion. Polling can be canceled by passing the
|
||||
// cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to
|
||||
// provide for the created account.
|
||||
func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (<-chan Account, <-chan error) {
|
||||
resultChan := make(chan Account, 1)
|
||||
errChan := make(chan error, 1)
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil},
|
||||
{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil},
|
||||
{Target: "parameters.Identity", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.Identity.Type", Name: validation.Null, Rule: true, Chain: nil}}},
|
||||
{Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}},
|
||||
}}}}}); err != nil {
|
||||
errChan <- validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Create")
|
||||
close(errChan)
|
||||
close(resultChan)
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
var result Account
|
||||
defer func() {
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
resultChan <- result
|
||||
close(resultChan)
|
||||
close(errChan)
|
||||
}()
|
||||
req, err := client.CreatePreparer(resourceGroupName, accountName, parameters, cancel)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", resp, "Failure responding to request")
|
||||
}
|
||||
}()
|
||||
return resultChan, errChan
|
||||
}
|
||||
|
||||
// CreatePreparer prepares the Create request.
|
||||
func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters, cancel <-chan struct{}) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{Cancel: cancel})
|
||||
}
|
||||
|
||||
// CreateSender sends the Create request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client),
|
||||
azure.DoPollForAsynchronous(client.PollingDelay))
|
||||
}
|
||||
|
||||
// CreateResponder handles the response to the Create request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete deletes a storage account in Microsoft Azure.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Delete")
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name,
|
||||
// location, and account status. The ListKeys operation should be used to retrieve storage keys.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "GetProperties")
|
||||
}
|
||||
|
||||
req, err := client.GetPropertiesPreparer(resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetPropertiesSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetPropertiesResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPropertiesPreparer prepares the GetProperties request.
|
||||
func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// GetPropertiesSender sends the GetProperties request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// GetPropertiesResponder handles the response to the GetProperties request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use
|
||||
// the ListKeys operation for this.
|
||||
func (client AccountsClient) List() (result AccountListResult, err error) {
|
||||
req, err := client.ListPreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client AccountsClient) ListPreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// ListAccountSAS list SAS credentials of a storage account.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to
|
||||
// provide to list SAS credentials for the storage account.
|
||||
func (client AccountsClient) ListAccountSAS(resourceGroupName string, accountName string, parameters AccountSasParameters) (result ListAccountSasResponse, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.SharedAccessExpiryTime", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListAccountSAS")
|
||||
}
|
||||
|
||||
req, err := client.ListAccountSASPreparer(resourceGroupName, accountName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListAccountSASSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListAccountSASResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListAccountSAS", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListAccountSASPreparer prepares the ListAccountSAS request.
|
||||
func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, accountName string, parameters AccountSasParameters) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListAccountSASSender sends the ListAccountSAS request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListAccountSASResponder handles the response to the ListAccountSAS request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) ListAccountSASResponder(resp *http.Response) (result ListAccountSasResponse, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys
|
||||
// are not returned; use the ListKeys operation for this.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListByResourceGroup")
|
||||
}
|
||||
|
||||
req, err := client.ListByResourceGroupPreparer(resourceGroupName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListByResourceGroupSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListByResourceGroupResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListByResourceGroupPreparer prepares the ListByResourceGroup request.
|
||||
func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// ListKeys lists the access keys for the specified storage account.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only.
|
||||
func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountListKeysResult, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListKeys")
|
||||
}
|
||||
|
||||
req, err := client.ListKeysPreparer(resourceGroupName, accountName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListKeysSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListKeysResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListKeysPreparer prepares the ListKeys request.
|
||||
func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListKeysSender sends the ListKeys request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListKeysResponder handles the response to the ListKeys request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// ListServiceSAS list service SAS credentials of a specific resource.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to
|
||||
// provide to list service SAS credentials.
|
||||
func (client AccountsClient) ListServiceSAS(resourceGroupName string, accountName string, parameters ServiceSasParameters) (result ListServiceSasResponse, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.CanonicalizedResource", Name: validation.Null, Rule: true, Chain: nil},
|
||||
{Target: "parameters.Identifier", Name: validation.Null, Rule: false,
|
||||
Chain: []validation.Constraint{{Target: "parameters.Identifier", Name: validation.MaxLength, Rule: 64, Chain: nil}}}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "ListServiceSAS")
|
||||
}
|
||||
|
||||
req, err := client.ListServiceSASPreparer(resourceGroupName, accountName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListServiceSASSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListServiceSASResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListServiceSAS", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListServiceSASPreparer prepares the ListServiceSAS request.
|
||||
func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, accountName string, parameters ServiceSasParameters) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListServiceSASSender sends the ListServiceSAS request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListServiceSASResponder handles the response to the ListServiceSAS request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) ListServiceSASResponder(resp *http.Response) (result ListServiceSasResponse, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// RegenerateKey regenerates one of the access keys for the specified storage account.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only. regenerateKey is specifies name
|
||||
// of the key which should be regenerated -- key1 or key2.
|
||||
func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}},
|
||||
{TargetValue: regenerateKey,
|
||||
Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "RegenerateKey")
|
||||
}
|
||||
|
||||
req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.RegenerateKeySender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.RegenerateKeyResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RegenerateKeyPreparer prepares the RegenerateKey request.
|
||||
func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters),
|
||||
autorest.WithJSON(regenerateKey),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// RegenerateKeySender sends the RegenerateKey request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account.
|
||||
// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account;
|
||||
// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value
|
||||
// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This
|
||||
// call does not change the storage keys for the account. If you want to change the storage account keys, use the
|
||||
// regenerate keys operation. The location and name of the storage account cannot be changed after creation.
|
||||
//
|
||||
// resourceGroupName is the name of the resource group within the user's subscription. The name is case insensitive.
|
||||
// accountName is the name of the storage account within the specified resource group. Storage account names must be
|
||||
// between 3 and 24 characters in length and use numbers and lower-case letters only. parameters is the parameters to
|
||||
// provide for the updated account.
|
||||
func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) {
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}},
|
||||
{TargetValue: accountName,
|
||||
Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil},
|
||||
{Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewErrorWithValidationError(err, "storage.AccountsClient", "Update")
|
||||
}
|
||||
|
||||
req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.UpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.UpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UpdatePreparer prepares the Update request.
|
||||
func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"accountName": autorest.Encode("path", accountName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsJSON(),
|
||||
autorest.AsPatch(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// UpdateSender sends the Update request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// UpdateResponder handles the response to the Update request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
51
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/client.go
generated
vendored
Normal file
51
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/client.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
// Package storage implements the Azure ARM Storage service API version 2017-10-01.
|
||||
//
|
||||
// The Azure Storage Management API.
|
||||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURI is the default URI used for the service Storage
|
||||
DefaultBaseURI = "https://management.azure.com"
|
||||
)
|
||||
|
||||
// ManagementClient is the base client for Storage.
|
||||
type ManagementClient struct {
|
||||
autorest.Client
|
||||
BaseURI string
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
// New creates an instance of the ManagementClient client.
|
||||
func New(subscriptionID string) ManagementClient {
|
||||
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewWithBaseURI creates an instance of the ManagementClient client.
|
||||
func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient {
|
||||
return ManagementClient{
|
||||
Client: autorest.NewClientWithUserAgent(UserAgent()),
|
||||
BaseURI: baseURI,
|
||||
SubscriptionID: subscriptionID,
|
||||
}
|
||||
}
|
605
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go
generated
vendored
Normal file
605
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/models.go
generated
vendored
Normal file
|
@ -0,0 +1,605 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/date"
|
||||
)
|
||||
|
||||
// AccessTier enumerates the values for access tier.
|
||||
type AccessTier string
|
||||
|
||||
const (
|
||||
// Cool specifies the cool state for access tier.
|
||||
Cool AccessTier = "Cool"
|
||||
// Hot specifies the hot state for access tier.
|
||||
Hot AccessTier = "Hot"
|
||||
)
|
||||
|
||||
// AccountStatus enumerates the values for account status.
|
||||
type AccountStatus string
|
||||
|
||||
const (
|
||||
// Available specifies the available state for account status.
|
||||
Available AccountStatus = "available"
|
||||
// Unavailable specifies the unavailable state for account status.
|
||||
Unavailable AccountStatus = "unavailable"
|
||||
)
|
||||
|
||||
// Action enumerates the values for action.
|
||||
type Action string
|
||||
|
||||
const (
|
||||
// Allow specifies the allow state for action.
|
||||
Allow Action = "Allow"
|
||||
)
|
||||
|
||||
// Bypass enumerates the values for bypass.
|
||||
type Bypass string
|
||||
|
||||
const (
|
||||
// AzureServices specifies the azure services state for bypass.
|
||||
AzureServices Bypass = "AzureServices"
|
||||
// Logging specifies the logging state for bypass.
|
||||
Logging Bypass = "Logging"
|
||||
// Metrics specifies the metrics state for bypass.
|
||||
Metrics Bypass = "Metrics"
|
||||
// None specifies the none state for bypass.
|
||||
None Bypass = "None"
|
||||
)
|
||||
|
||||
// DefaultAction enumerates the values for default action.
|
||||
type DefaultAction string
|
||||
|
||||
const (
|
||||
// DefaultActionAllow specifies the default action allow state for default action.
|
||||
DefaultActionAllow DefaultAction = "Allow"
|
||||
// DefaultActionDeny specifies the default action deny state for default action.
|
||||
DefaultActionDeny DefaultAction = "Deny"
|
||||
)
|
||||
|
||||
// HTTPProtocol enumerates the values for http protocol.
|
||||
type HTTPProtocol string
|
||||
|
||||
const (
|
||||
// HTTPS specifies the https state for http protocol.
|
||||
HTTPS HTTPProtocol = "https"
|
||||
// Httpshttp specifies the httpshttp state for http protocol.
|
||||
Httpshttp HTTPProtocol = "https,http"
|
||||
)
|
||||
|
||||
// KeyPermission enumerates the values for key permission.
|
||||
type KeyPermission string
|
||||
|
||||
const (
|
||||
// Full specifies the full state for key permission.
|
||||
Full KeyPermission = "Full"
|
||||
// Read specifies the read state for key permission.
|
||||
Read KeyPermission = "Read"
|
||||
)
|
||||
|
||||
// KeySource enumerates the values for key source.
|
||||
type KeySource string
|
||||
|
||||
const (
|
||||
// MicrosoftKeyvault specifies the microsoft keyvault state for key source.
|
||||
MicrosoftKeyvault KeySource = "Microsoft.Keyvault"
|
||||
// MicrosoftStorage specifies the microsoft storage state for key source.
|
||||
MicrosoftStorage KeySource = "Microsoft.Storage"
|
||||
)
|
||||
|
||||
// Kind enumerates the values for kind.
|
||||
type Kind string
|
||||
|
||||
const (
|
||||
// BlobStorage specifies the blob storage state for kind.
|
||||
BlobStorage Kind = "BlobStorage"
|
||||
// Storage specifies the storage state for kind.
|
||||
Storage Kind = "Storage"
|
||||
// StorageV2 specifies the storage v2 state for kind.
|
||||
StorageV2 Kind = "StorageV2"
|
||||
)
|
||||
|
||||
// Permissions enumerates the values for permissions.
|
||||
type Permissions string
|
||||
|
||||
const (
|
||||
// A specifies the a state for permissions.
|
||||
A Permissions = "a"
|
||||
// C specifies the c state for permissions.
|
||||
C Permissions = "c"
|
||||
// D specifies the d state for permissions.
|
||||
D Permissions = "d"
|
||||
// L specifies the l state for permissions.
|
||||
L Permissions = "l"
|
||||
// P specifies the p state for permissions.
|
||||
P Permissions = "p"
|
||||
// R specifies the r state for permissions.
|
||||
R Permissions = "r"
|
||||
// U specifies the u state for permissions.
|
||||
U Permissions = "u"
|
||||
// W specifies the w state for permissions.
|
||||
W Permissions = "w"
|
||||
)
|
||||
|
||||
// ProvisioningState enumerates the values for provisioning state.
|
||||
type ProvisioningState string
|
||||
|
||||
const (
|
||||
// Creating specifies the creating state for provisioning state.
|
||||
Creating ProvisioningState = "Creating"
|
||||
// ResolvingDNS specifies the resolving dns state for provisioning state.
|
||||
ResolvingDNS ProvisioningState = "ResolvingDNS"
|
||||
// Succeeded specifies the succeeded state for provisioning state.
|
||||
Succeeded ProvisioningState = "Succeeded"
|
||||
)
|
||||
|
||||
// Reason enumerates the values for reason.
|
||||
type Reason string
|
||||
|
||||
const (
|
||||
// AccountNameInvalid specifies the account name invalid state for reason.
|
||||
AccountNameInvalid Reason = "AccountNameInvalid"
|
||||
// AlreadyExists specifies the already exists state for reason.
|
||||
AlreadyExists Reason = "AlreadyExists"
|
||||
)
|
||||
|
||||
// ReasonCode enumerates the values for reason code.
|
||||
type ReasonCode string
|
||||
|
||||
const (
|
||||
// NotAvailableForSubscription specifies the not available for subscription state for reason code.
|
||||
NotAvailableForSubscription ReasonCode = "NotAvailableForSubscription"
|
||||
// QuotaID specifies the quota id state for reason code.
|
||||
QuotaID ReasonCode = "QuotaId"
|
||||
)
|
||||
|
||||
// Services enumerates the values for services.
|
||||
type Services string
|
||||
|
||||
const (
|
||||
// B specifies the b state for services.
|
||||
B Services = "b"
|
||||
// F specifies the f state for services.
|
||||
F Services = "f"
|
||||
// Q specifies the q state for services.
|
||||
Q Services = "q"
|
||||
// T specifies the t state for services.
|
||||
T Services = "t"
|
||||
)
|
||||
|
||||
// SignedResource enumerates the values for signed resource.
|
||||
type SignedResource string
|
||||
|
||||
const (
|
||||
// SignedResourceB specifies the signed resource b state for signed resource.
|
||||
SignedResourceB SignedResource = "b"
|
||||
// SignedResourceC specifies the signed resource c state for signed resource.
|
||||
SignedResourceC SignedResource = "c"
|
||||
// SignedResourceF specifies the signed resource f state for signed resource.
|
||||
SignedResourceF SignedResource = "f"
|
||||
// SignedResourceS specifies the signed resource s state for signed resource.
|
||||
SignedResourceS SignedResource = "s"
|
||||
)
|
||||
|
||||
// SignedResourceTypes enumerates the values for signed resource types.
|
||||
type SignedResourceTypes string
|
||||
|
||||
const (
|
||||
// SignedResourceTypesC specifies the signed resource types c state for signed resource types.
|
||||
SignedResourceTypesC SignedResourceTypes = "c"
|
||||
// SignedResourceTypesO specifies the signed resource types o state for signed resource types.
|
||||
SignedResourceTypesO SignedResourceTypes = "o"
|
||||
// SignedResourceTypesS specifies the signed resource types s state for signed resource types.
|
||||
SignedResourceTypesS SignedResourceTypes = "s"
|
||||
)
|
||||
|
||||
// SkuName enumerates the values for sku name.
|
||||
type SkuName string
|
||||
|
||||
const (
|
||||
// PremiumLRS specifies the premium lrs state for sku name.
|
||||
PremiumLRS SkuName = "Premium_LRS"
|
||||
// StandardGRS specifies the standard grs state for sku name.
|
||||
StandardGRS SkuName = "Standard_GRS"
|
||||
// StandardLRS specifies the standard lrs state for sku name.
|
||||
StandardLRS SkuName = "Standard_LRS"
|
||||
// StandardRAGRS specifies the standard ragrs state for sku name.
|
||||
StandardRAGRS SkuName = "Standard_RAGRS"
|
||||
// StandardZRS specifies the standard zrs state for sku name.
|
||||
StandardZRS SkuName = "Standard_ZRS"
|
||||
)
|
||||
|
||||
// SkuTier enumerates the values for sku tier.
|
||||
type SkuTier string
|
||||
|
||||
const (
|
||||
// Premium specifies the premium state for sku tier.
|
||||
Premium SkuTier = "Premium"
|
||||
// Standard specifies the standard state for sku tier.
|
||||
Standard SkuTier = "Standard"
|
||||
)
|
||||
|
||||
// State enumerates the values for state.
|
||||
type State string
|
||||
|
||||
const (
|
||||
// StateDeprovisioning specifies the state deprovisioning state for state.
|
||||
StateDeprovisioning State = "deprovisioning"
|
||||
// StateFailed specifies the state failed state for state.
|
||||
StateFailed State = "failed"
|
||||
// StateNetworkSourceDeleted specifies the state network source deleted state for state.
|
||||
StateNetworkSourceDeleted State = "networkSourceDeleted"
|
||||
// StateProvisioning specifies the state provisioning state for state.
|
||||
StateProvisioning State = "provisioning"
|
||||
// StateSucceeded specifies the state succeeded state for state.
|
||||
StateSucceeded State = "succeeded"
|
||||
)
|
||||
|
||||
// UsageUnit enumerates the values for usage unit.
|
||||
type UsageUnit string
|
||||
|
||||
const (
|
||||
// Bytes specifies the bytes state for usage unit.
|
||||
Bytes UsageUnit = "Bytes"
|
||||
// BytesPerSecond specifies the bytes per second state for usage unit.
|
||||
BytesPerSecond UsageUnit = "BytesPerSecond"
|
||||
// Count specifies the count state for usage unit.
|
||||
Count UsageUnit = "Count"
|
||||
// CountsPerSecond specifies the counts per second state for usage unit.
|
||||
CountsPerSecond UsageUnit = "CountsPerSecond"
|
||||
// Percent specifies the percent state for usage unit.
|
||||
Percent UsageUnit = "Percent"
|
||||
// Seconds specifies the seconds state for usage unit.
|
||||
Seconds UsageUnit = "Seconds"
|
||||
)
|
||||
|
||||
// Account is the storage account.
|
||||
type Account struct {
|
||||
autorest.Response `json:"-"`
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Type *string `json:"type,omitempty"`
|
||||
Location *string `json:"location,omitempty"`
|
||||
Tags *map[string]*string `json:"tags,omitempty"`
|
||||
Sku *Sku `json:"sku,omitempty"`
|
||||
Kind Kind `json:"kind,omitempty"`
|
||||
Identity *Identity `json:"identity,omitempty"`
|
||||
*AccountProperties `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// AccountCheckNameAvailabilityParameters is the parameters used to check the availabity of the storage account name.
|
||||
type AccountCheckNameAvailabilityParameters struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// AccountCreateParameters is the parameters used when creating a storage account.
|
||||
type AccountCreateParameters struct {
|
||||
Sku *Sku `json:"sku,omitempty"`
|
||||
Kind Kind `json:"kind,omitempty"`
|
||||
Location *string `json:"location,omitempty"`
|
||||
Tags *map[string]*string `json:"tags,omitempty"`
|
||||
Identity *Identity `json:"identity,omitempty"`
|
||||
*AccountPropertiesCreateParameters `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// AccountKey is an access key for the storage account.
|
||||
type AccountKey struct {
|
||||
KeyName *string `json:"keyName,omitempty"`
|
||||
Value *string `json:"value,omitempty"`
|
||||
Permissions KeyPermission `json:"permissions,omitempty"`
|
||||
}
|
||||
|
||||
// AccountListKeysResult is the response from the ListKeys operation.
|
||||
type AccountListKeysResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
Keys *[]AccountKey `json:"keys,omitempty"`
|
||||
}
|
||||
|
||||
// AccountListResult is the response from the List Storage Accounts operation.
|
||||
type AccountListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
Value *[]Account `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// AccountProperties is properties of the storage account.
|
||||
type AccountProperties struct {
|
||||
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
|
||||
PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"`
|
||||
PrimaryLocation *string `json:"primaryLocation,omitempty"`
|
||||
StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"`
|
||||
LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"`
|
||||
SecondaryLocation *string `json:"secondaryLocation,omitempty"`
|
||||
StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"`
|
||||
CreationTime *date.Time `json:"creationTime,omitempty"`
|
||||
CustomDomain *CustomDomain `json:"customDomain,omitempty"`
|
||||
SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"`
|
||||
Encryption *Encryption `json:"encryption,omitempty"`
|
||||
AccessTier AccessTier `json:"accessTier,omitempty"`
|
||||
EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
|
||||
NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
|
||||
}
|
||||
|
||||
// AccountPropertiesCreateParameters is the parameters used to create the storage account.
|
||||
type AccountPropertiesCreateParameters struct {
|
||||
CustomDomain *CustomDomain `json:"customDomain,omitempty"`
|
||||
Encryption *Encryption `json:"encryption,omitempty"`
|
||||
NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
|
||||
AccessTier AccessTier `json:"accessTier,omitempty"`
|
||||
EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
|
||||
}
|
||||
|
||||
// AccountPropertiesUpdateParameters is the parameters used when updating a storage account.
|
||||
type AccountPropertiesUpdateParameters struct {
|
||||
CustomDomain *CustomDomain `json:"customDomain,omitempty"`
|
||||
Encryption *Encryption `json:"encryption,omitempty"`
|
||||
AccessTier AccessTier `json:"accessTier,omitempty"`
|
||||
EnableHTTPSTrafficOnly *bool `json:"supportsHttpsTrafficOnly,omitempty"`
|
||||
NetworkRuleSet *NetworkRuleSet `json:"networkAcls,omitempty"`
|
||||
}
|
||||
|
||||
// AccountRegenerateKeyParameters is the parameters used to regenerate the storage account key.
|
||||
type AccountRegenerateKeyParameters struct {
|
||||
KeyName *string `json:"keyName,omitempty"`
|
||||
}
|
||||
|
||||
// AccountSasParameters is the parameters to list SAS credentials of a storage account.
|
||||
type AccountSasParameters struct {
|
||||
Services Services `json:"signedServices,omitempty"`
|
||||
ResourceTypes SignedResourceTypes `json:"signedResourceTypes,omitempty"`
|
||||
Permissions Permissions `json:"signedPermission,omitempty"`
|
||||
IPAddressOrRange *string `json:"signedIp,omitempty"`
|
||||
Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
|
||||
SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
|
||||
SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
|
||||
KeyToSign *string `json:"keyToSign,omitempty"`
|
||||
}
|
||||
|
||||
// AccountUpdateParameters is the parameters that can be provided when updating the storage account properties.
|
||||
type AccountUpdateParameters struct {
|
||||
Sku *Sku `json:"sku,omitempty"`
|
||||
Tags *map[string]*string `json:"tags,omitempty"`
|
||||
Identity *Identity `json:"identity,omitempty"`
|
||||
*AccountPropertiesUpdateParameters `json:"properties,omitempty"`
|
||||
Kind Kind `json:"kind,omitempty"`
|
||||
}
|
||||
|
||||
// CheckNameAvailabilityResult is the CheckNameAvailability operation response.
|
||||
type CheckNameAvailabilityResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
NameAvailable *bool `json:"nameAvailable,omitempty"`
|
||||
Reason Reason `json:"reason,omitempty"`
|
||||
Message *string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// CustomDomain is the custom domain assigned to this storage account. This can be set via Update.
|
||||
type CustomDomain struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
UseSubDomain *bool `json:"useSubDomain,omitempty"`
|
||||
}
|
||||
|
||||
// Dimension is dimension of blobs, possiblly be blob type or access tier.
|
||||
type Dimension struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
DisplayName *string `json:"displayName,omitempty"`
|
||||
}
|
||||
|
||||
// Encryption is the encryption settings on the storage account.
|
||||
type Encryption struct {
|
||||
Services *EncryptionServices `json:"services,omitempty"`
|
||||
KeySource KeySource `json:"keySource,omitempty"`
|
||||
KeyVaultProperties *KeyVaultProperties `json:"keyvaultproperties,omitempty"`
|
||||
}
|
||||
|
||||
// EncryptionService is a service that allows server-side encryption to be used.
|
||||
type EncryptionService struct {
|
||||
Enabled *bool `json:"enabled,omitempty"`
|
||||
LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"`
|
||||
}
|
||||
|
||||
// EncryptionServices is a list of services that support encryption.
|
||||
type EncryptionServices struct {
|
||||
Blob *EncryptionService `json:"blob,omitempty"`
|
||||
File *EncryptionService `json:"file,omitempty"`
|
||||
Table *EncryptionService `json:"table,omitempty"`
|
||||
Queue *EncryptionService `json:"queue,omitempty"`
|
||||
}
|
||||
|
||||
// Endpoints is the URIs that are used to perform a retrieval of a public blob, queue, or table object.
|
||||
type Endpoints struct {
|
||||
Blob *string `json:"blob,omitempty"`
|
||||
Queue *string `json:"queue,omitempty"`
|
||||
Table *string `json:"table,omitempty"`
|
||||
File *string `json:"file,omitempty"`
|
||||
}
|
||||
|
||||
// Identity is identity for the resource.
|
||||
type Identity struct {
|
||||
PrincipalID *string `json:"principalId,omitempty"`
|
||||
TenantID *string `json:"tenantId,omitempty"`
|
||||
Type *string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// IPRule is IP rule with specific IP or IP range in CIDR format.
|
||||
type IPRule struct {
|
||||
IPAddressOrRange *string `json:"value,omitempty"`
|
||||
Action Action `json:"action,omitempty"`
|
||||
}
|
||||
|
||||
// KeyVaultProperties is properties of key vault.
|
||||
type KeyVaultProperties struct {
|
||||
KeyName *string `json:"keyname,omitempty"`
|
||||
KeyVersion *string `json:"keyversion,omitempty"`
|
||||
KeyVaultURI *string `json:"keyvaulturi,omitempty"`
|
||||
}
|
||||
|
||||
// ListAccountSasResponse is the List SAS credentials operation response.
|
||||
type ListAccountSasResponse struct {
|
||||
autorest.Response `json:"-"`
|
||||
AccountSasToken *string `json:"accountSasToken,omitempty"`
|
||||
}
|
||||
|
||||
// ListServiceSasResponse is the List service SAS credentials operation response.
|
||||
type ListServiceSasResponse struct {
|
||||
autorest.Response `json:"-"`
|
||||
ServiceSasToken *string `json:"serviceSasToken,omitempty"`
|
||||
}
|
||||
|
||||
// MetricSpecification is metric specification of operation.
|
||||
type MetricSpecification struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
DisplayName *string `json:"displayName,omitempty"`
|
||||
DisplayDescription *string `json:"displayDescription,omitempty"`
|
||||
Unit *string `json:"unit,omitempty"`
|
||||
Dimensions *[]Dimension `json:"dimensions,omitempty"`
|
||||
AggregationType *string `json:"aggregationType,omitempty"`
|
||||
FillGapWithZero *bool `json:"fillGapWithZero,omitempty"`
|
||||
Category *string `json:"category,omitempty"`
|
||||
ResourceIDDimensionNameOverride *string `json:"resourceIdDimensionNameOverride,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkRuleSet is network rule set
|
||||
type NetworkRuleSet struct {
|
||||
Bypass Bypass `json:"bypass,omitempty"`
|
||||
VirtualNetworkRules *[]VirtualNetworkRule `json:"virtualNetworkRules,omitempty"`
|
||||
IPRules *[]IPRule `json:"ipRules,omitempty"`
|
||||
DefaultAction DefaultAction `json:"defaultAction,omitempty"`
|
||||
}
|
||||
|
||||
// Operation is storage REST API operation definition.
|
||||
type Operation struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
Display *OperationDisplay `json:"display,omitempty"`
|
||||
Origin *string `json:"origin,omitempty"`
|
||||
*OperationProperties `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// OperationDisplay is display metadata associated with the operation.
|
||||
type OperationDisplay struct {
|
||||
Provider *string `json:"provider,omitempty"`
|
||||
Resource *string `json:"resource,omitempty"`
|
||||
Operation *string `json:"operation,omitempty"`
|
||||
}
|
||||
|
||||
// OperationListResult is result of the request to list Storage operations. It contains a list of operations and a URL
|
||||
// link to get the next set of results.
|
||||
type OperationListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
Value *[]Operation `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// OperationProperties is properties of operation, include metric specifications.
|
||||
type OperationProperties struct {
|
||||
ServiceSpecification *ServiceSpecification `json:"serviceSpecification,omitempty"`
|
||||
}
|
||||
|
||||
// Resource is describes a storage resource.
|
||||
type Resource struct {
|
||||
ID *string `json:"id,omitempty"`
|
||||
Name *string `json:"name,omitempty"`
|
||||
Type *string `json:"type,omitempty"`
|
||||
Location *string `json:"location,omitempty"`
|
||||
Tags *map[string]*string `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// Restriction is the restriction because of which SKU cannot be used.
|
||||
type Restriction struct {
|
||||
Type *string `json:"type,omitempty"`
|
||||
Values *[]string `json:"values,omitempty"`
|
||||
ReasonCode ReasonCode `json:"reasonCode,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSasParameters is the parameters to list service SAS credentials of a speicific resource.
|
||||
type ServiceSasParameters struct {
|
||||
CanonicalizedResource *string `json:"canonicalizedResource,omitempty"`
|
||||
Resource SignedResource `json:"signedResource,omitempty"`
|
||||
Permissions Permissions `json:"signedPermission,omitempty"`
|
||||
IPAddressOrRange *string `json:"signedIp,omitempty"`
|
||||
Protocols HTTPProtocol `json:"signedProtocol,omitempty"`
|
||||
SharedAccessStartTime *date.Time `json:"signedStart,omitempty"`
|
||||
SharedAccessExpiryTime *date.Time `json:"signedExpiry,omitempty"`
|
||||
Identifier *string `json:"signedIdentifier,omitempty"`
|
||||
PartitionKeyStart *string `json:"startPk,omitempty"`
|
||||
PartitionKeyEnd *string `json:"endPk,omitempty"`
|
||||
RowKeyStart *string `json:"startRk,omitempty"`
|
||||
RowKeyEnd *string `json:"endRk,omitempty"`
|
||||
KeyToSign *string `json:"keyToSign,omitempty"`
|
||||
CacheControl *string `json:"rscc,omitempty"`
|
||||
ContentDisposition *string `json:"rscd,omitempty"`
|
||||
ContentEncoding *string `json:"rsce,omitempty"`
|
||||
ContentLanguage *string `json:"rscl,omitempty"`
|
||||
ContentType *string `json:"rsct,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceSpecification is one property of operation, include metric specifications.
|
||||
type ServiceSpecification struct {
|
||||
MetricSpecifications *[]MetricSpecification `json:"metricSpecifications,omitempty"`
|
||||
}
|
||||
|
||||
// Sku is the SKU of the storage account.
|
||||
type Sku struct {
|
||||
Name SkuName `json:"name,omitempty"`
|
||||
Tier SkuTier `json:"tier,omitempty"`
|
||||
ResourceType *string `json:"resourceType,omitempty"`
|
||||
Kind Kind `json:"kind,omitempty"`
|
||||
Locations *[]string `json:"locations,omitempty"`
|
||||
Capabilities *[]SKUCapability `json:"capabilities,omitempty"`
|
||||
Restrictions *[]Restriction `json:"restrictions,omitempty"`
|
||||
}
|
||||
|
||||
// SKUCapability is the capability information in the specified sku, including file encryption, network acls, change
|
||||
// notification, etc.
|
||||
type SKUCapability struct {
|
||||
Name *string `json:"name,omitempty"`
|
||||
Value *string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// SkuListResult is the response from the List Storage SKUs operation.
|
||||
type SkuListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
Value *[]Sku `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// Usage is describes Storage Resource Usage.
|
||||
type Usage struct {
|
||||
Unit UsageUnit `json:"unit,omitempty"`
|
||||
CurrentValue *int32 `json:"currentValue,omitempty"`
|
||||
Limit *int32 `json:"limit,omitempty"`
|
||||
Name *UsageName `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// UsageListResult is the response from the List Usages operation.
|
||||
type UsageListResult struct {
|
||||
autorest.Response `json:"-"`
|
||||
Value *[]Usage `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// UsageName is the usage names that can be used; currently limited to StorageAccount.
|
||||
type UsageName struct {
|
||||
Value *string `json:"value,omitempty"`
|
||||
LocalizedValue *string `json:"localizedValue,omitempty"`
|
||||
}
|
||||
|
||||
// VirtualNetworkRule is virtual Network rule.
|
||||
type VirtualNetworkRule struct {
|
||||
VirtualNetworkResourceID *string `json:"id,omitempty"`
|
||||
Action Action `json:"action,omitempty"`
|
||||
State State `json:"state,omitempty"`
|
||||
}
|
98
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/operations.go
generated
vendored
Normal file
98
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/operations.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// OperationsClient is the the Azure Storage Management API.
|
||||
type OperationsClient struct {
|
||||
ManagementClient
|
||||
}
|
||||
|
||||
// NewOperationsClient creates an instance of the OperationsClient client.
|
||||
func NewOperationsClient(subscriptionID string) OperationsClient {
|
||||
return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client.
|
||||
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
|
||||
return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List lists all of the available Storage Rest API operations.
|
||||
func (client OperationsClient) List() (result OperationListResult, err error) {
|
||||
req, err := client.ListPreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.OperationsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client OperationsClient) ListPreparer() (*http.Request, error) {
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPath("/providers/Microsoft.Storage/operations"),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
102
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/skus.go
generated
vendored
Normal file
102
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/skus.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SkusClient is the the Azure Storage Management API.
|
||||
type SkusClient struct {
|
||||
ManagementClient
|
||||
}
|
||||
|
||||
// NewSkusClient creates an instance of the SkusClient client.
|
||||
func NewSkusClient(subscriptionID string) SkusClient {
|
||||
return NewSkusClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewSkusClientWithBaseURI creates an instance of the SkusClient client.
|
||||
func NewSkusClientWithBaseURI(baseURI string, subscriptionID string) SkusClient {
|
||||
return SkusClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List lists the available SKUs supported by Microsoft.Storage for given subscription.
|
||||
func (client SkusClient) List() (result SkuListResult, err error) {
|
||||
req, err := client.ListPreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.SkusClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client SkusClient) ListPreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client SkusClient) ListResponder(resp *http.Response) (result SkuListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
102
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/usage.go
generated
vendored
Normal file
102
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/usage.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// UsageClient is the the Azure Storage Management API.
|
||||
type UsageClient struct {
|
||||
ManagementClient
|
||||
}
|
||||
|
||||
// NewUsageClient creates an instance of the UsageClient client.
|
||||
func NewUsageClient(subscriptionID string) UsageClient {
|
||||
return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewUsageClientWithBaseURI creates an instance of the UsageClient client.
|
||||
func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient {
|
||||
return UsageClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List gets the current usage count and the limit for the resources under the subscription.
|
||||
func (client UsageClient) List() (result UsageListResult, err error) {
|
||||
req, err := client.ListPreparer()
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client UsageClient) ListPreparer() (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2017-10-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare(&http.Request{})
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return autorest.SendWithSender(client,
|
||||
req,
|
||||
azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
28
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/version.go
generated
vendored
Normal file
28
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage/version.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
package storage
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/v11.3.0-beta arm-storage/2017-10-01"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return "v11.3.0-beta"
|
||||
}
|
|
@ -1,7 +1,23 @@
|
|||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -31,8 +47,7 @@ func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
return b.respondCreation(resp, BlobTypeAppend)
|
||||
}
|
||||
|
||||
// AppendBlockOptions includes the options for an append block operation
|
||||
|
@ -46,6 +61,7 @@ type AppendBlockOptions struct {
|
|||
IfMatch string `header:"If-Match"`
|
||||
IfNoneMatch string `header:"If-None-Match"`
|
||||
RequestID string `header:"x-ms-client-request-id"`
|
||||
ContentMD5 bool
|
||||
}
|
||||
|
||||
// AppendBlock appends a block to an append blob.
|
||||
|
@ -60,6 +76,10 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
|||
if options != nil {
|
||||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
if options.ContentMD5 {
|
||||
md5sum := md5.Sum(chunk)
|
||||
headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:])
|
||||
}
|
||||
}
|
||||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
|
@ -67,6 +87,5 @@ func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
return b.respondCreation(resp, BlobTypeAppend)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,20 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
@ -41,16 +55,18 @@ const (
|
|||
)
|
||||
|
||||
func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) {
|
||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !c.sasClient {
|
||||
authHeader, err := c.getSharedKey(verb, url, headers, auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headers[headerAuthorization] = authHeader
|
||||
}
|
||||
headers[headerAuthorization] = authHeader
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
|
||||
canRes, err := c.buildCanonicalizedResource(url, auth)
|
||||
canRes, err := c.buildCanonicalizedResource(url, auth, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -62,15 +78,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth
|
|||
return c.createAuthorizationHeader(canString, auth), nil
|
||||
}
|
||||
|
||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) {
|
||||
func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
|
||||
errMsg := "buildCanonicalizedResource error: %s"
|
||||
u, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(errMsg, err.Error())
|
||||
}
|
||||
|
||||
cr := bytes.NewBufferString("/")
|
||||
cr.WriteString(c.getCanonicalizedAccountName())
|
||||
cr := bytes.NewBufferString("")
|
||||
if c.accountName != StorageEmulatorAccountName || !sas {
|
||||
cr.WriteString("/")
|
||||
cr.WriteString(c.getCanonicalizedAccountName())
|
||||
}
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
// Any portion of the CanonicalizedResource string that is derived from
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
|
@ -90,7 +104,7 @@ type BlobProperties struct {
|
|||
CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"`
|
||||
ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"`
|
||||
ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"`
|
||||
BlobType BlobType `xml:"x-ms-blob-blob-type"`
|
||||
BlobType BlobType `xml:"BlobType"`
|
||||
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
|
||||
CopyID string `xml:"CopyId"`
|
||||
CopyStatus string `xml:"CopyStatus"`
|
||||
|
@ -135,8 +149,7 @@ func (b *Blob) Exists() (bool, error) {
|
|||
}
|
||||
|
||||
// GetURL gets the canonical URL to the blob with the specified name in the
|
||||
// specified container. If name is not specified, the canonical URL for the entire
|
||||
// container is obtained.
|
||||
// specified container.
|
||||
// This method does not create a publicly accessible URL if the blob or container
|
||||
// is private and this method does not check if the blob exists.
|
||||
func (b *Blob) GetURL() string {
|
||||
|
@ -437,8 +450,8 @@ func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error {
|
|||
uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params)
|
||||
|
||||
if b.Properties.BlobType == BlobTypePage {
|
||||
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("byte %v", b.Properties.ContentLength))
|
||||
if options != nil || options.SequenceNumberAction != nil {
|
||||
headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength))
|
||||
if options != nil && options.SequenceNumberAction != nil {
|
||||
headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction))
|
||||
if *options.SequenceNumberAction != SequenceNumberActionIncrement {
|
||||
headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber))
|
||||
|
@ -536,27 +549,7 @@ func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error {
|
|||
}
|
||||
|
||||
func (b *Blob) writeMetadata(h http.Header) {
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range h {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
|
||||
b.Metadata = BlobMetadata(metadata)
|
||||
b.Metadata = BlobMetadata(writeMetadata(h))
|
||||
}
|
||||
|
||||
// DeleteBlobOptions includes the options for a delete blob operation
|
||||
|
@ -627,3 +620,13 @@ func pathForResource(container, name string) string {
|
|||
}
|
||||
return fmt.Sprintf("/%s", container)
|
||||
}
|
||||
|
||||
func (b *Blob) respondCreation(resp *storageResponse, bt BlobType) error {
|
||||
readAndCloseBody(resp.body)
|
||||
err := checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Properties.BlobType = bt
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -8,68 +22,122 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// GetSASURIWithSignedIPAndProtocol creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time. Also includes signedIPRange and allowed protocols.
|
||||
// If old API version is used but no signedIP is passed (ie empty string) then this should still work.
|
||||
// We only populate the signedIP when it non-empty.
|
||||
// OverrideHeaders defines overridable response heaedrs in
|
||||
// a request using a SAS URI.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
type OverrideHeaders struct {
|
||||
CacheControl string
|
||||
ContentDisposition string
|
||||
ContentEncoding string
|
||||
ContentLanguage string
|
||||
ContentType string
|
||||
}
|
||||
|
||||
// BlobSASOptions are options to construct a blob SAS
|
||||
// URI.
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
type BlobSASOptions struct {
|
||||
BlobServiceSASPermissions
|
||||
OverrideHeaders
|
||||
SASOptions
|
||||
}
|
||||
|
||||
// BlobServiceSASPermissions includes the available permissions for
|
||||
// blob service SAS URI.
|
||||
type BlobServiceSASPermissions struct {
|
||||
Read bool
|
||||
Add bool
|
||||
Create bool
|
||||
Write bool
|
||||
Delete bool
|
||||
}
|
||||
|
||||
func (p BlobServiceSASPermissions) buildString() string {
|
||||
permissions := ""
|
||||
if p.Read {
|
||||
permissions += "r"
|
||||
}
|
||||
if p.Add {
|
||||
permissions += "a"
|
||||
}
|
||||
if p.Create {
|
||||
permissions += "c"
|
||||
}
|
||||
if p.Write {
|
||||
permissions += "w"
|
||||
}
|
||||
if p.Delete {
|
||||
permissions += "d"
|
||||
}
|
||||
return permissions
|
||||
}
|
||||
|
||||
// GetSASURI creates an URL to the blob which contains the Shared
|
||||
// Access Signature with the specified options.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions string, signedIPRange string, HTTPSOnly bool) (string, error) {
|
||||
var (
|
||||
signedPermissions = permissions
|
||||
blobURL = b.GetURL()
|
||||
)
|
||||
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth)
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
|
||||
func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) {
|
||||
uri := b.GetURL()
|
||||
signedResource := "b"
|
||||
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// "The canonicalizedresouce portion of the string is a canonical path to the signed resource.
|
||||
// It must include the service name (blob, table, queue or file) for version 2015-02-21 or
|
||||
// later, the storage account name, and the resource name, and must be URL-decoded.
|
||||
// -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
||||
permissions := options.BlobServiceSASPermissions.buildString()
|
||||
return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders)
|
||||
}
|
||||
|
||||
func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) {
|
||||
start := ""
|
||||
if options.Start != (time.Time{}) {
|
||||
start = options.Start.UTC().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
expiry := options.Expiry.UTC().Format(time.RFC3339)
|
||||
|
||||
// We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component).
|
||||
canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1)
|
||||
canonicalizedResource, err = url.QueryUnescape(canonicalizedResource)
|
||||
canonicalizedResource, err := url.QueryUnescape(canonicalizedResource)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
signedExpiry := expiry.UTC().Format(time.RFC3339)
|
||||
|
||||
//If blob name is missing, resource is a container
|
||||
signedResource := "c"
|
||||
if len(b.Name) > 0 {
|
||||
signedResource = "b"
|
||||
}
|
||||
|
||||
protocols := "https,http"
|
||||
if HTTPSOnly {
|
||||
protocols := ""
|
||||
if options.UseHTTPS {
|
||||
protocols = "https"
|
||||
}
|
||||
stringToSign, err := blobSASStringToSign(b.Container.bsc.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions, signedIPRange, protocols)
|
||||
stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, headers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sig := b.Container.bsc.client.computeHmac256(stringToSign)
|
||||
sig := c.computeHmac256(stringToSign)
|
||||
sasParams := url.Values{
|
||||
"sv": {b.Container.bsc.client.apiVersion},
|
||||
"se": {signedExpiry},
|
||||
"sv": {c.apiVersion},
|
||||
"se": {expiry},
|
||||
"sr": {signedResource},
|
||||
"sp": {signedPermissions},
|
||||
"sp": {permissions},
|
||||
"sig": {sig},
|
||||
}
|
||||
|
||||
if b.Container.bsc.client.apiVersion >= "2015-04-05" {
|
||||
sasParams.Add("spr", protocols)
|
||||
if signedIPRange != "" {
|
||||
sasParams.Add("sip", signedIPRange)
|
||||
if c.apiVersion >= "2015-04-05" {
|
||||
if protocols != "" {
|
||||
sasParams.Add("spr", protocols)
|
||||
}
|
||||
if options.IP != "" {
|
||||
sasParams.Add("sip", options.IP)
|
||||
}
|
||||
}
|
||||
|
||||
sasURL, err := url.Parse(blobURL)
|
||||
// Add override response hedaers
|
||||
addQueryParameter(sasParams, "rscc", headers.CacheControl)
|
||||
addQueryParameter(sasParams, "rscd", headers.ContentDisposition)
|
||||
addQueryParameter(sasParams, "rsce", headers.ContentEncoding)
|
||||
addQueryParameter(sasParams, "rscl", headers.ContentLanguage)
|
||||
addQueryParameter(sasParams, "rsct", headers.ContentType)
|
||||
|
||||
sasURL, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -77,16 +145,12 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st
|
|||
return sasURL.String(), nil
|
||||
}
|
||||
|
||||
// GetSASURI creates an URL to the specified blob which contains the Shared
|
||||
// Access Signature with specified permissions and expiration time.
|
||||
//
|
||||
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
|
||||
func (b *Blob) GetSASURI(expiry time.Time, permissions string) (string, error) {
|
||||
return b.GetSASURIWithSignedIPAndProtocol(expiry, permissions, "", false)
|
||||
}
|
||||
|
||||
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string, signedIP string, protocols string) (string, error) {
|
||||
var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string
|
||||
func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion string, headers OverrideHeaders) (string, error) {
|
||||
rscc := headers.CacheControl
|
||||
rscd := headers.ContentDisposition
|
||||
rsce := headers.ContentEncoding
|
||||
rscl := headers.ContentLanguage
|
||||
rsct := headers.ContentType
|
||||
|
||||
if signedVersion >= "2015-02-21" {
|
||||
canonicalizedResource = "/blob" + canonicalizedResource
|
||||
|
|
|
@ -1,9 +1,26 @@
|
|||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
|
||||
|
@ -45,6 +62,21 @@ func (b *BlobStorageClient) GetContainerReference(name string) *Container {
|
|||
}
|
||||
}
|
||||
|
||||
// GetContainerReferenceFromSASURI returns a Container object for the specified
|
||||
// container SASURI
|
||||
func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) {
|
||||
path := strings.Split(sasuri.Path, "/")
|
||||
if len(path) <= 1 {
|
||||
return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String())
|
||||
}
|
||||
cli := newSASClient().GetBlobService()
|
||||
return &Container{
|
||||
bsc: &cli,
|
||||
Name: path[1],
|
||||
sasuri: sasuri,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListContainers returns the list of containers in a storage account along with
|
||||
// pagination token and other response details.
|
||||
//
|
||||
|
@ -54,21 +86,53 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con
|
|||
uri := b.client.getEndpoint(blobServiceName, "", q)
|
||||
headers := b.client.getStandardHeaders()
|
||||
|
||||
var out ContainerListResponse
|
||||
type ContainerAlias struct {
|
||||
bsc *BlobStorageClient
|
||||
Name string `xml:"Name"`
|
||||
Properties ContainerProperties `xml:"Properties"`
|
||||
Metadata BlobMetadata
|
||||
sasuri url.URL
|
||||
}
|
||||
type ContainerListResponseAlias struct {
|
||||
XMLName xml.Name `xml:"EnumerationResults"`
|
||||
Xmlns string `xml:"xmlns,attr"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Marker string `xml:"Marker"`
|
||||
NextMarker string `xml:"NextMarker"`
|
||||
MaxResults int64 `xml:"MaxResults"`
|
||||
Containers []ContainerAlias `xml:"Containers>Container"`
|
||||
}
|
||||
|
||||
var outAlias ContainerListResponseAlias
|
||||
resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.body.Close()
|
||||
err = xmlUnmarshal(resp.body, &out)
|
||||
err = xmlUnmarshal(resp.body, &outAlias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// assign our client to the newly created Container objects
|
||||
for i := range out.Containers {
|
||||
out.Containers[i].bsc = &b
|
||||
out := ContainerListResponse{
|
||||
XMLName: outAlias.XMLName,
|
||||
Xmlns: outAlias.Xmlns,
|
||||
Prefix: outAlias.Prefix,
|
||||
Marker: outAlias.Marker,
|
||||
NextMarker: outAlias.NextMarker,
|
||||
MaxResults: outAlias.MaxResults,
|
||||
Containers: make([]Container, len(outAlias.Containers)),
|
||||
}
|
||||
for i, cnt := range outAlias.Containers {
|
||||
out.Containers[i] = Container{
|
||||
bsc: &b,
|
||||
Name: cnt.Name,
|
||||
Properties: cnt.Properties,
|
||||
Metadata: map[string]string(cnt.Metadata),
|
||||
sasuri: cnt.sasuri,
|
||||
}
|
||||
}
|
||||
|
||||
return &out, err
|
||||
}
|
||||
|
||||
|
@ -93,3 +157,26 @@ func (p ListContainersParameters) getParameters() url.Values {
|
|||
|
||||
return out
|
||||
}
|
||||
|
||||
func writeMetadata(h http.Header) map[string]string {
|
||||
metadata := make(map[string]string)
|
||||
for k, v := range h {
|
||||
// Can't trust CanonicalHeaderKey() to munge case
|
||||
// reliably. "_" is allowed in identifiers:
|
||||
// https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx
|
||||
// https://msdn.microsoft.com/library/aa664670(VS.71).aspx
|
||||
// http://tools.ietf.org/html/rfc7230#section-3.2
|
||||
// ...but "_" is considered invalid by
|
||||
// CanonicalMIMEHeaderKey in
|
||||
// https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542
|
||||
// so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl".
|
||||
k = strings.ToLower(k)
|
||||
if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) {
|
||||
continue
|
||||
}
|
||||
// metadata["lol"] = content of the last X-Ms-Meta-Lol header
|
||||
k = k[len(userDefinedMetadataHeaderPrefix):]
|
||||
metadata[k] = v[len(v)-1]
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
|
|
|
@ -1,5 +1,19 @@
|
|||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
|
@ -132,8 +146,7 @@ func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
return b.respondCreation(resp, BlobTypeBlock)
|
||||
}
|
||||
|
||||
// PutBlockOptions includes the options for a put block operation
|
||||
|
@ -181,8 +194,7 @@ func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, o
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readAndCloseBody(resp.body)
|
||||
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
|
||||
return b.respondCreation(resp, BlobTypeBlock)
|
||||
}
|
||||
|
||||
// PutBlockListOptions includes the options for a put block list operation
|
||||
|
|
|
@ -1,6 +1,20 @@
|
|||
// Package storage provides clients for Microsoft Azure Storage Services.
|
||||
package storage
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
|
@ -17,6 +31,7 @@ import (
|
|||
"net/url"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -33,7 +48,9 @@ const (
|
|||
// basic client is created.
|
||||
DefaultAPIVersion = "2016-05-31"
|
||||
|
||||
defaultUseHTTPS = true
|
||||
defaultUseHTTPS = true
|
||||
defaultRetryAttempts = 5
|
||||
defaultRetryDuration = time.Second * 5
|
||||
|
||||
// StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator
|
||||
StorageEmulatorAccountName = "devstoreaccount1"
|
||||
|
@ -53,10 +70,22 @@ const (
|
|||
userAgentHeader = "User-Agent"
|
||||
|
||||
userDefinedMetadataHeaderPrefix = "x-ms-meta-"
|
||||
|
||||
connectionStringAccountName = "accountname"
|
||||
connectionStringAccountKey = "accountkey"
|
||||
connectionStringEndpointSuffix = "endpointsuffix"
|
||||
connectionStringEndpointProtocol = "defaultendpointsprotocol"
|
||||
)
|
||||
|
||||
var (
|
||||
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
||||
validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$")
|
||||
defaultValidStatusCodes = []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout, // 504
|
||||
}
|
||||
)
|
||||
|
||||
// Sender sends a request
|
||||
|
@ -112,6 +141,8 @@ type Client struct {
|
|||
baseURL string
|
||||
apiVersion string
|
||||
userAgent string
|
||||
sasClient bool
|
||||
accountSASToken url.Values
|
||||
}
|
||||
|
||||
type storageResponse struct {
|
||||
|
@ -179,6 +210,45 @@ func (e UnexpectedStatusCodeError) Got() int {
|
|||
return e.got
|
||||
}
|
||||
|
||||
// NewClientFromConnectionString creates a Client from the connection string.
|
||||
func NewClientFromConnectionString(input string) (Client, error) {
|
||||
var (
|
||||
accountName, accountKey, endpointSuffix string
|
||||
useHTTPS = defaultUseHTTPS
|
||||
)
|
||||
|
||||
for _, pair := range strings.Split(input, ";") {
|
||||
if pair == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
equalDex := strings.IndexByte(pair, '=')
|
||||
if equalDex <= 0 {
|
||||
return Client{}, fmt.Errorf("Invalid connection segment %q", pair)
|
||||
}
|
||||
|
||||
value := pair[equalDex+1:]
|
||||
key := strings.ToLower(pair[:equalDex])
|
||||
switch key {
|
||||
case connectionStringAccountName:
|
||||
accountName = value
|
||||
case connectionStringAccountKey:
|
||||
accountKey = value
|
||||
case connectionStringEndpointSuffix:
|
||||
endpointSuffix = value
|
||||
case connectionStringEndpointProtocol:
|
||||
useHTTPS = value == "https"
|
||||
default:
|
||||
// ignored
|
||||
}
|
||||
}
|
||||
|
||||
if accountName == StorageEmulatorAccountName {
|
||||
return NewEmulatorClient()
|
||||
}
|
||||
return NewClient(accountName, accountKey, endpointSuffix, DefaultAPIVersion, useHTTPS)
|
||||
}
|
||||
|
||||
// NewBasicClient constructs a Client with given storage service name and
|
||||
// key.
|
||||
func NewBasicClient(accountName, accountKey string) (Client, error) {
|
||||
|
@ -206,13 +276,13 @@ func NewEmulatorClient() (Client, error) {
|
|||
// NewClient constructs a Client. This should be used if the caller wants
|
||||
// to specify whether to use HTTPS, a specific REST API version or a custom
|
||||
// storage endpoint than Azure Public Cloud.
|
||||
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||
func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
|
||||
var c Client
|
||||
if !IsValidStorageAccount(accountName) {
|
||||
return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName)
|
||||
} else if accountKey == "" {
|
||||
return c, fmt.Errorf("azure: account key required")
|
||||
} else if blobServiceBaseURL == "" {
|
||||
} else if serviceBaseURL == "" {
|
||||
return c, fmt.Errorf("azure: base storage service url required")
|
||||
}
|
||||
|
||||
|
@ -226,19 +296,14 @@ func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, u
|
|||
accountName: accountName,
|
||||
accountKey: key,
|
||||
useHTTPS: useHTTPS,
|
||||
baseURL: blobServiceBaseURL,
|
||||
baseURL: serviceBaseURL,
|
||||
apiVersion: apiVersion,
|
||||
sasClient: false,
|
||||
UseSharedKeyLite: false,
|
||||
Sender: &DefaultSender{
|
||||
RetryAttempts: 5,
|
||||
ValidStatusCodes: []int{
|
||||
http.StatusRequestTimeout, // 408
|
||||
http.StatusInternalServerError, // 500
|
||||
http.StatusBadGateway, // 502
|
||||
http.StatusServiceUnavailable, // 503
|
||||
http.StatusGatewayTimeout, // 504
|
||||
},
|
||||
RetryDuration: time.Second * 5,
|
||||
RetryAttempts: defaultRetryAttempts,
|
||||
ValidStatusCodes: defaultValidStatusCodes,
|
||||
RetryDuration: defaultRetryDuration,
|
||||
},
|
||||
}
|
||||
c.userAgent = c.getDefaultUserAgent()
|
||||
|
@ -251,6 +316,43 @@ func IsValidStorageAccount(account string) bool {
|
|||
return validStorageAccount.MatchString(account)
|
||||
}
|
||||
|
||||
// NewAccountSASClient contructs a client that uses accountSAS authorization
|
||||
// for its operations.
|
||||
func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client {
|
||||
c := newSASClient()
|
||||
c.accountSASToken = token
|
||||
c.accountName = account
|
||||
c.baseURL = env.StorageEndpointSuffix
|
||||
|
||||
// Get API version and protocol from token
|
||||
c.apiVersion = token.Get("sv")
|
||||
c.useHTTPS = token.Get("spr") == "https"
|
||||
return c
|
||||
}
|
||||
|
||||
func newSASClient() Client {
|
||||
c := Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
apiVersion: DefaultAPIVersion,
|
||||
sasClient: true,
|
||||
Sender: &DefaultSender{
|
||||
RetryAttempts: defaultRetryAttempts,
|
||||
ValidStatusCodes: defaultValidStatusCodes,
|
||||
RetryDuration: defaultRetryDuration,
|
||||
},
|
||||
}
|
||||
c.userAgent = c.getDefaultUserAgent()
|
||||
return c
|
||||
}
|
||||
|
||||
func (c Client) isServiceSASClient() bool {
|
||||
return c.sasClient && c.accountSASToken == nil
|
||||
}
|
||||
|
||||
func (c Client) isAccountSASClient() bool {
|
||||
return c.sasClient && c.accountSASToken != nil
|
||||
}
|
||||
|
||||
func (c Client) getDefaultUserAgent() string {
|
||||
return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s",
|
||||
runtime.Version(),
|
||||
|
@ -323,6 +425,164 @@ func (c Client) getEndpoint(service, path string, params url.Values) string {
|
|||
return u.String()
|
||||
}
|
||||
|
||||
// AccountSASTokenOptions includes options for constructing
|
||||
// an account SAS token.
|
||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
type AccountSASTokenOptions struct {
|
||||
APIVersion string
|
||||
Services Services
|
||||
ResourceTypes ResourceTypes
|
||||
Permissions Permissions
|
||||
Start time.Time
|
||||
Expiry time.Time
|
||||
IP string
|
||||
UseHTTPS bool
|
||||
}
|
||||
|
||||
// Services specify services accessible with an account SAS.
|
||||
type Services struct {
|
||||
Blob bool
|
||||
Queue bool
|
||||
Table bool
|
||||
File bool
|
||||
}
|
||||
|
||||
// ResourceTypes specify the resources accesible with an
|
||||
// account SAS.
|
||||
type ResourceTypes struct {
|
||||
Service bool
|
||||
Container bool
|
||||
Object bool
|
||||
}
|
||||
|
||||
// Permissions specifies permissions for an accountSAS.
|
||||
type Permissions struct {
|
||||
Read bool
|
||||
Write bool
|
||||
Delete bool
|
||||
List bool
|
||||
Add bool
|
||||
Create bool
|
||||
Update bool
|
||||
Process bool
|
||||
}
|
||||
|
||||
// GetAccountSASToken creates an account SAS token
|
||||
// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas
|
||||
func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) {
|
||||
if options.APIVersion == "" {
|
||||
options.APIVersion = c.apiVersion
|
||||
}
|
||||
|
||||
if options.APIVersion < "2015-04-05" {
|
||||
return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion)
|
||||
}
|
||||
|
||||
// build services string
|
||||
services := ""
|
||||
if options.Services.Blob {
|
||||
services += "b"
|
||||
}
|
||||
if options.Services.Queue {
|
||||
services += "q"
|
||||
}
|
||||
if options.Services.Table {
|
||||
services += "t"
|
||||
}
|
||||
if options.Services.File {
|
||||
services += "f"
|
||||
}
|
||||
|
||||
// build resources string
|
||||
resources := ""
|
||||
if options.ResourceTypes.Service {
|
||||
resources += "s"
|
||||
}
|
||||
if options.ResourceTypes.Container {
|
||||
resources += "c"
|
||||
}
|
||||
if options.ResourceTypes.Object {
|
||||
resources += "o"
|
||||
}
|
||||
|
||||
// build permissions string
|
||||
permissions := ""
|
||||
if options.Permissions.Read {
|
||||
permissions += "r"
|
||||
}
|
||||
if options.Permissions.Write {
|
||||
permissions += "w"
|
||||
}
|
||||
if options.Permissions.Delete {
|
||||
permissions += "d"
|
||||
}
|
||||
if options.Permissions.List {
|
||||
permissions += "l"
|
||||
}
|
||||
if options.Permissions.Add {
|
||||
permissions += "a"
|
||||
}
|
||||
if options.Permissions.Create {
|
||||
permissions += "c"
|
||||
}
|
||||
if options.Permissions.Update {
|
||||
permissions += "u"
|
||||
}
|
||||
if options.Permissions.Process {
|
||||
permissions += "p"
|
||||
}
|
||||
|
||||
// build start time, if exists
|
||||
start := ""
|
||||
if options.Start != (time.Time{}) {
|
||||
start = options.Start.Format(time.RFC3339)
|
||||
// For some reason I don't understand, it fails when the rest of the string is included
|
||||
start = start[:10]
|
||||
}
|
||||
|
||||
// build expiry time
|
||||
expiry := options.Expiry.Format(time.RFC3339)
|
||||
// For some reason I don't understand, it fails when the rest of the string is included
|
||||
expiry = expiry[:10]
|
||||
|
||||
protocol := "https,http"
|
||||
if options.UseHTTPS {
|
||||
protocol = "https"
|
||||
}
|
||||
|
||||
stringToSign := strings.Join([]string{
|
||||
c.accountName,
|
||||
permissions,
|
||||
services,
|
||||
resources,
|
||||
start,
|
||||
expiry,
|
||||
options.IP,
|
||||
protocol,
|
||||
options.APIVersion,
|
||||
"",
|
||||
}, "\n")
|
||||
signature := c.computeHmac256(stringToSign)
|
||||
|
||||
sasParams := url.Values{
|
||||
"sv": {options.APIVersion},
|
||||
"ss": {services},
|
||||
"srt": {resources},
|
||||
"sp": {permissions},
|
||||
"se": {expiry},
|
||||
"spr": {protocol},
|
||||
"sig": {signature},
|
||||
}
|
||||
if start != "" {
|
||||
sasParams.Add("st", start)
|
||||
}
|
||||
if options.IP != "" {
|
||||
sasParams.Add("sip", options.IP)
|
||||
}
|
||||
|
||||
return sasParams, nil
|
||||
}
|
||||
|
||||
// GetBlobService returns a BlobStorageClient which can operate on the blob
|
||||
// service of the storage account.
|
||||
func (c Client) GetBlobService() BlobStorageClient {
|
||||
|
@ -398,16 +658,12 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
|
|||
return nil, errors.New("azure/storage: error creating request: " + err.Error())
|
||||
}
|
||||
|
||||
// if a body was provided ensure that the content length was set.
|
||||
// http.NewRequest() will automatically do this for a handful of types
|
||||
// and for those that it doesn't we will handle here.
|
||||
if body != nil && req.ContentLength < 1 {
|
||||
if lr, ok := body.(*io.LimitedReader); ok {
|
||||
req.ContentLength = lr.N
|
||||
snapshot := *lr
|
||||
req.GetBody = func() (io.ReadCloser, error) {
|
||||
r := snapshot
|
||||
return ioutil.NopCloser(&r), nil
|
||||
// http.NewRequest() will automatically set req.ContentLength for a handful of types
|
||||
// otherwise we will handle here.
|
||||
if req.ContentLength < 1 {
|
||||
if clstr, ok := headers["Content-Length"]; ok {
|
||||
if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil {
|
||||
req.ContentLength = cl
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue