Merge pull request #173 from ncdc/move-restore-warnings-errors-to-files
Move restore status warnings/errors to object storage; add restore describe commandpull/175/merge
commit
8b25114047
|
@ -30,6 +30,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
|||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
## ark describe
|
||||
|
||||
Describe ark resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe ark resources
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark describe restores](ark_describe_restores.md) - Describe restores
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
## ark describe restores
|
||||
|
||||
Describe restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe restores
|
||||
|
||||
```
|
||||
ark describe restores [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for restores
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
|
|
@ -30,6 +30,7 @@ Work with restores
|
|||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark restore create](ark_restore_create.md) - Create a restore
|
||||
* [ark restore delete](ark_restore_delete.md) - Delete a restore
|
||||
* [ark restore describe](ark_restore_describe.md) - Describe restores
|
||||
* [ark restore get](ark_restore_get.md) - Get restores
|
||||
* [ark restore logs](ark_restore_logs.md) - Get restore logs
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
## ark restore describe
|
||||
|
||||
Describe restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe restores
|
||||
|
||||
```
|
||||
ark restore describe [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
|
@ -31,6 +31,7 @@ const (
|
|||
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
|
||||
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
|
||||
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
|
||||
DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults"
|
||||
)
|
||||
|
||||
// DownloadTarget is the specification for what kind of file to download, and the name of the
|
||||
|
|
|
@ -91,13 +91,13 @@ type RestoreStatus struct {
|
|||
// applicable)
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// Warnings is a collection of all warning messages that were
|
||||
// generated during execution of the restore
|
||||
Warnings RestoreResult `json:"warnings"`
|
||||
// Warnings is a count of all warning messages that were generated during
|
||||
// execution of the restore. The actual warnings are stored in object storage.
|
||||
Warnings int `json:"warnings"`
|
||||
|
||||
// Errors is a collection of all error messages that were
|
||||
// generated during execution of the restore
|
||||
Errors RestoreResult `json:"errors"`
|
||||
// Errors is a count of all error messages that were generated during
|
||||
// execution of the restore. The actual errors are stored in object storage.
|
||||
Errors int `json:"errors"`
|
||||
}
|
||||
|
||||
// RestoreResult is a collection of messages that were generated
|
||||
|
|
|
@ -919,8 +919,6 @@ func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.Warnings.DeepCopyInto(&out.Warnings)
|
||||
in.Errors.DeepCopyInto(&out.Errors)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -59,6 +59,9 @@ type BackupService interface {
|
|||
|
||||
// UploadRestoreLog uploads the restore's log file to object storage.
|
||||
UploadRestoreLog(bucket, backup, restore string, log io.Reader) error
|
||||
|
||||
// UploadRestoreResults uploads the restore's results file to object storage.
|
||||
UploadRestoreResults(bucket, backup, restore string, results io.Reader) error
|
||||
}
|
||||
|
||||
// BackupGetter knows how to list backups in object storage.
|
||||
|
@ -68,10 +71,11 @@ type BackupGetter interface {
|
|||
}
|
||||
|
||||
const (
|
||||
metadataFileFormatString = "%s/ark-backup.json"
|
||||
backupFileFormatString = "%s/%s.tar.gz"
|
||||
backupLogFileFormatString = "%s/%s-logs.gz"
|
||||
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
|
||||
metadataFileFormatString = "%s/ark-backup.json"
|
||||
backupFileFormatString = "%s/%s.tar.gz"
|
||||
backupLogFileFormatString = "%s/%s-logs.gz"
|
||||
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
|
||||
restoreResultsFileFormatString = "%s/restore-%s-results.gz"
|
||||
)
|
||||
|
||||
func getMetadataKey(backup string) string {
|
||||
|
@ -90,6 +94,10 @@ func getRestoreLogKey(backup, restore string) string {
|
|||
return fmt.Sprintf(restoreLogFileFormatString, backup, restore)
|
||||
}
|
||||
|
||||
func getRestoreResultsKey(backup, restore string) string {
|
||||
return fmt.Sprintf(restoreResultsFileFormatString, backup, restore)
|
||||
}
|
||||
|
||||
type backupService struct {
|
||||
objectStorage ObjectStorageAdapter
|
||||
decoder runtime.Decoder
|
||||
|
@ -219,23 +227,35 @@ func (br *backupService) CreateSignedURL(target api.DownloadTarget, bucket strin
|
|||
case api.DownloadTargetKindBackupLog:
|
||||
return br.objectStorage.CreateSignedURL(bucket, getBackupLogKey(target.Name), ttl)
|
||||
case api.DownloadTargetKindRestoreLog:
|
||||
// restore name is formatted as <backup name>-<timestamp>
|
||||
i := strings.LastIndex(target.Name, "-")
|
||||
if i < 0 {
|
||||
i = len(target.Name)
|
||||
}
|
||||
backup := target.Name[0:i]
|
||||
backup := extractBackupName(target.Name)
|
||||
return br.objectStorage.CreateSignedURL(bucket, getRestoreLogKey(backup, target.Name), ttl)
|
||||
case api.DownloadTargetKindRestoreResults:
|
||||
backup := extractBackupName(target.Name)
|
||||
return br.objectStorage.CreateSignedURL(bucket, getRestoreResultsKey(backup, target.Name), ttl)
|
||||
default:
|
||||
return "", errors.Errorf("unsupported download target kind %q", target.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func extractBackupName(s string) string {
|
||||
// restore name is formatted as <backup name>-<timestamp>
|
||||
i := strings.LastIndex(s, "-")
|
||||
if i < 0 {
|
||||
i = len(s)
|
||||
}
|
||||
return s[0:i]
|
||||
}
|
||||
|
||||
func (br *backupService) UploadRestoreLog(bucket, backup, restore string, log io.Reader) error {
|
||||
key := getRestoreLogKey(backup, restore)
|
||||
return br.objectStorage.PutObject(bucket, key, log)
|
||||
}
|
||||
|
||||
func (br *backupService) UploadRestoreResults(bucket, backup, restore string, results io.Reader) error {
|
||||
key := getRestoreResultsKey(backup, restore)
|
||||
return br.objectStorage.PutObject(bucket, key, results)
|
||||
}
|
||||
|
||||
// cachedBackupService wraps a real backup service with a cache for getting cloud backups.
|
||||
type cachedBackupService struct {
|
||||
BackupService
|
||||
|
|
|
@ -304,6 +304,24 @@ func TestCreateSignedURL(t *testing.T) {
|
|||
targetName: "b-cool-20170913154901-20170913154902",
|
||||
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has no dash",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-20170913154901",
|
||||
expectedKey: "b/restore-b-20170913154901-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has 1 dash",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-cool-20170913154901",
|
||||
expectedKey: "b-cool/restore-b-cool-20170913154901-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-cool-20170913154901-20170913154902",
|
||||
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-results.gz",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/create"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/describe"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/get"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
|
@ -54,6 +55,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
|
|||
server.NewCommand(),
|
||||
version.NewCommand(),
|
||||
get.NewCommand(f),
|
||||
describe.NewCommand(f),
|
||||
create.NewCommand(f),
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package describe
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
)
|
||||
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "describe",
|
||||
Short: "Describe ark resources",
|
||||
Long: "Describe ark resources",
|
||||
}
|
||||
|
||||
//backupCommand := backup.NewGetCommand(f, "backups")
|
||||
//backupCommand.Aliases = []string{"backup"}
|
||||
|
||||
//scheduleCommand := schedule.NewGetCommand(f, "schedules")
|
||||
//scheduleCommand.Aliases = []string{"schedule"}
|
||||
|
||||
restoreCommand := restore.NewDescribeCommand(f, "restores")
|
||||
restoreCommand.Aliases = []string{"restore"}
|
||||
|
||||
c.AddCommand(
|
||||
//backupCommand,
|
||||
//scheduleCommand,
|
||||
restoreCommand,
|
||||
)
|
||||
|
||||
return c
|
||||
}
|
|
@ -17,18 +17,173 @@ limitations under the License.
|
|||
package restore
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/downloadrequest"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
)
|
||||
|
||||
func NewDescribeCommand(f client.Factory) *cobra.Command {
|
||||
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
var listOptions metav1.ListOptions
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "describe",
|
||||
Short: "Describe a backup",
|
||||
Use: use,
|
||||
Short: "Describe restores",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
arkClient, err := f.Client()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var restores *api.RestoreList
|
||||
if len(args) > 0 {
|
||||
restores = new(api.RestoreList)
|
||||
for _, name := range args {
|
||||
restore, err := arkClient.Ark().Restores(api.DefaultNamespace).Get(name, metav1.GetOptions{})
|
||||
cmd.CheckError(err)
|
||||
restores.Items = append(restores.Items, *restore)
|
||||
}
|
||||
} else {
|
||||
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, restore := range restores.Items {
|
||||
s := output.Describe(func(out io.Writer) {
|
||||
describeRestore(out, &restore, arkClient)
|
||||
})
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
} else {
|
||||
fmt.Printf("\n\n%s", s)
|
||||
}
|
||||
}
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
|
||||
|
||||
output.BindFlags(c.Flags())
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func describeRestore(out io.Writer, restore *api.Restore, arkClient clientset.Interface) {
|
||||
output.DescribeMetadata(out, restore.ObjectMeta)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintf(out, "Backup:\t%s\n", restore.Spec.BackupName)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintf(out, "Namespaces:\n")
|
||||
var s string
|
||||
if len(restore.Spec.IncludedNamespaces) == 0 {
|
||||
s = "*"
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.IncludedNamespaces, ", ")
|
||||
}
|
||||
fmt.Fprintf(out, "\tIncluded:\t%s\n", s)
|
||||
if len(restore.Spec.ExcludedNamespaces) == 0 {
|
||||
s = "<none>"
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.ExcludedNamespaces, ", ")
|
||||
}
|
||||
fmt.Fprintf(out, "\tExcluded:\t%s\n", s)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintf(out, "Resources:\n")
|
||||
if len(restore.Spec.IncludedResources) == 0 {
|
||||
s = "*"
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.IncludedResources, ", ")
|
||||
}
|
||||
fmt.Fprintf(out, "\tIncluded:\t%s\n", s)
|
||||
if len(restore.Spec.ExcludedResources) == 0 {
|
||||
s = "<none>"
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.ExcludedResources, ", ")
|
||||
}
|
||||
fmt.Fprintf(out, "\tExcluded:\t%s\n", s)
|
||||
|
||||
fmt.Fprintf(out, "\tCluster-scoped:\t%s\n", output.BoolPointerString(restore.Spec.IncludeClusterResources, "excluded", "included", "auto"))
|
||||
|
||||
fmt.Fprintln(out)
|
||||
output.DescribeMap(out, "Namespace mappings", restore.Spec.NamespaceMapping)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
s = "<none>"
|
||||
if restore.Spec.LabelSelector != nil {
|
||||
s = metav1.FormatLabelSelector(restore.Spec.LabelSelector)
|
||||
}
|
||||
fmt.Fprintf(out, "Label selector:\t%s\n", s)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintf(out, "Restore PVs:\t%s\n", output.BoolPointerString(restore.Spec.RestorePVs, "false", "true", "auto"))
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprintf(out, "Phase:\t%s\n", restore.Status.Phase)
|
||||
|
||||
fmt.Fprintln(out)
|
||||
fmt.Fprint(out, "Validation errors:")
|
||||
if len(restore.Status.ValidationErrors) == 0 {
|
||||
fmt.Fprintf(out, "\t<none>\n")
|
||||
} else {
|
||||
for _, ve := range restore.Status.ValidationErrors {
|
||||
fmt.Fprintf(out, "\t%s\n", ve)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(out)
|
||||
describeRestoreResults(out, arkClient, restore)
|
||||
}
|
||||
|
||||
func describeRestoreResults(out io.Writer, arkClient clientset.Interface, restore *api.Restore) {
|
||||
if restore.Status.Warnings == 0 && restore.Status.Errors == 0 {
|
||||
fmt.Fprintf(out, "Warnings:\t<none>\nErrors:\t<none>\n")
|
||||
return
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var resultMap map[string]api.RestoreResult
|
||||
|
||||
if err := downloadrequest.Stream(arkClient.ArkV1(), restore.Name, api.DownloadTargetKindRestoreResults, &buf, 30*time.Second); err != nil {
|
||||
fmt.Fprintf(out, "Warnings:\t<error getting warnings: %v>\n\nErrors:\t<error getting errors: %v>\n", err, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(&buf).Decode(&resultMap); err != nil {
|
||||
fmt.Fprintf(out, "Warnings:\t<error decoding warnings: %v>\n\nErrors:\t<error decoding errors: %v>\n", err, err)
|
||||
return
|
||||
}
|
||||
|
||||
describeRestoreResult(out, "Warnings", resultMap["warnings"])
|
||||
fmt.Fprintln(out)
|
||||
describeRestoreResult(out, "Errors", resultMap["errors"])
|
||||
}
|
||||
|
||||
func describeRestoreResult(out io.Writer, name string, result api.RestoreResult) {
|
||||
fmt.Fprintf(out, "%s:\n", name)
|
||||
output.DescribeSlice(out, 1, "Ark", result.Ark)
|
||||
output.DescribeSlice(out, 1, "Cluster", result.Cluster)
|
||||
if len(result.Namespaces) == 0 {
|
||||
fmt.Fprintf(out, "\tNamespaces: <none>\n")
|
||||
} else {
|
||||
fmt.Fprintf(out, "\tNamespaces:\n")
|
||||
for ns, warnings := range result.Namespaces {
|
||||
output.DescribeSlice(out, 2, ns, warnings)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,8 +33,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
|||
NewCreateCommand(f, "create"),
|
||||
NewGetCommand(f, "get"),
|
||||
NewLogsCommand(f),
|
||||
// Will implement later
|
||||
// NewDescribeCommand(f),
|
||||
NewDescribeCommand(f, "describe"),
|
||||
NewDeleteCommand(f),
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package output
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Describe configures a tab writer, passing it to fn. The tab writer's output is returned to the
|
||||
// caller.
|
||||
func Describe(fn func(out io.Writer)) string {
|
||||
out := new(tabwriter.Writer)
|
||||
buf := &bytes.Buffer{}
|
||||
out.Init(buf, 0, 8, 2, ' ', 0)
|
||||
|
||||
fn(out)
|
||||
|
||||
out.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// DescribeMetadata describes standard object metadata in a consistent manner.
|
||||
func DescribeMetadata(out io.Writer, metadata metav1.ObjectMeta) {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", metadata.Name)
|
||||
fmt.Fprintf(out, "Namespace:\t%s\n", metadata.Namespace)
|
||||
DescribeMap(out, "Labels", metadata.Labels)
|
||||
DescribeMap(out, "Annotations", metadata.Annotations)
|
||||
}
|
||||
|
||||
// DescribeMap describes a map of key-value pairs using name as the heading.
|
||||
func DescribeMap(out io.Writer, name string, m map[string]string) {
|
||||
fmt.Fprintf(out, "%s:\t", name)
|
||||
|
||||
first := true
|
||||
prefix := ""
|
||||
if len(m) > 0 {
|
||||
keys := make([]string, 0, len(m))
|
||||
for key := range m {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
fmt.Fprintf(out, "%s%s=%s\n", prefix, key, m[key])
|
||||
if first {
|
||||
first = false
|
||||
prefix = "\t"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Fprint(out, "<none>\n")
|
||||
}
|
||||
}
|
||||
|
||||
// DescribeSlice describes a slice of strings using name as the heading. The output is prefixed by
|
||||
// "preindent" number of tabs.
|
||||
func DescribeSlice(out io.Writer, preindent int, name string, s []string) {
|
||||
pretab := strings.Repeat("\t", preindent)
|
||||
fmt.Fprintf(out, "%s%s:\t", pretab, name)
|
||||
|
||||
first := true
|
||||
prefix := ""
|
||||
if len(s) > 0 {
|
||||
for _, x := range s {
|
||||
fmt.Fprintf(out, "%s%s\n", prefix, x)
|
||||
if first {
|
||||
first = false
|
||||
prefix = pretab + "\t"
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s<none>\n", pretab)
|
||||
}
|
||||
}
|
||||
|
||||
// BoolPointerString returns the appropriate string based on the bool pointer's value.
|
||||
func BoolPointerString(b *bool, falseString, trueString, nilString string) string {
|
||||
if b == nil {
|
||||
return nilString
|
||||
}
|
||||
if *b {
|
||||
return trueString
|
||||
}
|
||||
return falseString
|
||||
}
|
|
@ -53,15 +53,17 @@ func printRestore(restore *v1.Restore, w io.Writer, options printers.PrintOption
|
|||
status = v1.RestorePhaseNew
|
||||
}
|
||||
|
||||
warnings := len(restore.Status.Warnings.Ark) + len(restore.Status.Warnings.Cluster)
|
||||
for _, w := range restore.Status.Warnings.Namespaces {
|
||||
warnings += len(w)
|
||||
}
|
||||
errors := len(restore.Status.Errors.Ark) + len(restore.Status.Errors.Cluster)
|
||||
for _, e := range restore.Status.Errors.Namespaces {
|
||||
errors += len(e)
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%d\t%s\t%s", name, restore.Spec.BackupName, status, warnings, errors, restore.CreationTimestamp.Time, metav1.FormatLabelSelector(restore.Spec.LabelSelector)); err != nil {
|
||||
if _, err := fmt.Fprintf(
|
||||
w,
|
||||
"%s\t%s\t%s\t%d\t%d\t%s\t%s",
|
||||
name,
|
||||
restore.Spec.BackupName,
|
||||
status,
|
||||
restore.Status.Warnings,
|
||||
restore.Status.Errors,
|
||||
restore.CreationTimestamp.Time,
|
||||
metav1.FormatLabelSelector(restore.Spec.LabelSelector),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,9 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
@ -254,7 +256,17 @@ func (controller *restoreController) processRestore(key string) error {
|
|||
|
||||
logContext.Debug("Running restore")
|
||||
// execution & upload of restore
|
||||
restore.Status.Warnings, restore.Status.Errors = controller.runRestore(restore, controller.bucket)
|
||||
restoreWarnings, restoreErrors := controller.runRestore(restore, controller.bucket)
|
||||
|
||||
restore.Status.Warnings = len(restoreWarnings.Ark) + len(restoreWarnings.Cluster)
|
||||
for _, w := range restoreWarnings.Namespaces {
|
||||
restore.Status.Warnings += len(w)
|
||||
}
|
||||
|
||||
restore.Status.Errors = len(restoreErrors.Ark) + len(restoreErrors.Cluster)
|
||||
for _, e := range restoreErrors.Namespaces {
|
||||
restore.Status.Errors += len(e)
|
||||
}
|
||||
|
||||
logContext.Debug("restore completed")
|
||||
restore.Status.Phase = api.RestorePhaseCompleted
|
||||
|
@ -327,22 +339,29 @@ func (controller *restoreController) fetchBackup(bucket, name string) (*api.Back
|
|||
return backup, nil
|
||||
}
|
||||
|
||||
func (controller *restoreController) runRestore(restore *api.Restore, bucket string) (warnings, restoreErrors api.RestoreResult) {
|
||||
logContext := controller.logger.WithField("restore", kubeutil.NamespaceAndName(restore))
|
||||
func (controller *restoreController) runRestore(restore *api.Restore, bucket string) (restoreWarnings, restoreErrors api.RestoreResult) {
|
||||
logContext := controller.logger.WithFields(
|
||||
logrus.Fields{
|
||||
"restore": kubeutil.NamespaceAndName(restore),
|
||||
"backup": restore.Spec.BackupName,
|
||||
})
|
||||
|
||||
backup, err := controller.fetchBackup(bucket, restore.Spec.BackupName)
|
||||
if err != nil {
|
||||
logContext.WithError(err).WithField("backup", restore.Spec.BackupName).Error("Error getting backup")
|
||||
logContext.WithError(err).Error("Error getting backup")
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
tmpFile, err := downloadToTempFile(restore.Spec.BackupName, controller.backupService, bucket, controller.logger)
|
||||
var tempFiles []*os.File
|
||||
|
||||
backupFile, err := downloadToTempFile(restore.Spec.BackupName, controller.backupService, bucket, controller.logger)
|
||||
if err != nil {
|
||||
logContext.WithError(err).WithField("backup", restore.Spec.BackupName).Error("Error downloading backup")
|
||||
logContext.WithError(err).Error("Error downloading backup")
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
return
|
||||
}
|
||||
tempFiles = append(tempFiles, backupFile)
|
||||
|
||||
logFile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
|
@ -350,26 +369,29 @@ func (controller *restoreController) runRestore(restore *api.Restore, bucket str
|
|||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
return
|
||||
}
|
||||
tempFiles = append(tempFiles, logFile)
|
||||
|
||||
resultsFile, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error creating results temp file")
|
||||
restoreErrors.Ark = append(restoreErrors.Ark, err.Error())
|
||||
return
|
||||
}
|
||||
tempFiles = append(tempFiles, resultsFile)
|
||||
|
||||
defer func() {
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", tmpFile.Name()).Error("Error closing file")
|
||||
}
|
||||
for _, file := range tempFiles {
|
||||
if err := file.Close(); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", file.Name()).Error("Error closing file")
|
||||
}
|
||||
|
||||
if err := os.Remove(tmpFile.Name()); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", tmpFile.Name()).Error("Error removing file")
|
||||
}
|
||||
|
||||
if err := logFile.Close(); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", logFile.Name()).Error("Error closing file")
|
||||
}
|
||||
|
||||
if err := os.Remove(logFile.Name()); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", logFile.Name()).Error("Error removing file")
|
||||
if err := os.Remove(file.Name()); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).WithField("file", file.Name()).Error("Error removing file")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
warnings, restoreErrors = controller.restorer.Restore(restore, backup, tmpFile, logFile)
|
||||
restoreWarnings, restoreErrors = controller.restorer.Restore(restore, backup, backupFile, logFile)
|
||||
|
||||
// Try to upload the log file. This is best-effort. If we fail, we'll add to the ark errors.
|
||||
|
||||
|
@ -383,6 +405,23 @@ func (controller *restoreController) runRestore(restore *api.Restore, bucket str
|
|||
restoreErrors.Ark = append(restoreErrors.Ark, fmt.Sprintf("error uploading log file to object storage: %v", err))
|
||||
}
|
||||
|
||||
m := map[string]api.RestoreResult{
|
||||
"warnings": restoreWarnings,
|
||||
"errors": restoreErrors,
|
||||
}
|
||||
|
||||
gzippedResultsFile := gzip.NewWriter(resultsFile)
|
||||
|
||||
if err := json.NewEncoder(gzippedResultsFile).Encode(m); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error encoding restore results")
|
||||
return
|
||||
}
|
||||
gzippedResultsFile.Close()
|
||||
|
||||
if err := controller.backupService.UploadRestoreResults(bucket, restore.Spec.BackupName, restore.Name, resultsFile); err != nil {
|
||||
logContext.WithError(errors.WithStack(err)).Error("Error uploading results files to object storage")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -189,9 +189,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||
WithErrors(api.RestoreResult{
|
||||
Ark: []string{"no backup here"},
|
||||
}).
|
||||
WithErrors(1).
|
||||
Restore,
|
||||
},
|
||||
},
|
||||
|
@ -204,11 +202,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
expectedRestoreUpdates: []*api.Restore{
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).
|
||||
WithErrors(api.RestoreResult{
|
||||
Namespaces: map[string][]string{
|
||||
"ns-1": {"blarg"},
|
||||
},
|
||||
}).
|
||||
WithErrors(1).
|
||||
Restore,
|
||||
},
|
||||
expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore,
|
||||
|
@ -319,6 +313,7 @@ func TestProcessRestore(t *testing.T) {
|
|||
backupSvc.On("DownloadBackup", mock.Anything, mock.Anything).Return(downloadedBackup, nil)
|
||||
restorer.On("Restore", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
|
||||
backupSvc.On("UploadRestoreLog", "bucket", test.restore.Spec.BackupName, test.restore.Name, mock.Anything).Return(test.uploadLogError)
|
||||
backupSvc.On("UploadRestoreResults", "bucket", test.restore.Spec.BackupName, test.restore.Name, mock.Anything).Return(nil)
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -132,7 +132,7 @@ func (_m *BackupService) GetBackup(bucket string, name string) (*v1.Backup, erro
|
|||
}
|
||||
|
||||
// UploadBackup provides a mock function with given fields: bucket, name, metadata, backup, log
|
||||
func (_m *BackupService) UploadBackup(bucket string, name string, metadata, backup, log io.Reader) error {
|
||||
func (_m *BackupService) UploadBackup(bucket string, name string, metadata io.Reader, backup io.Reader, log io.Reader) error {
|
||||
ret := _m.Called(bucket, name, metadata, backup, log)
|
||||
|
||||
var r0 error
|
||||
|
@ -158,3 +158,17 @@ func (_m *BackupService) UploadRestoreLog(bucket string, backup string, restore
|
|||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UploadRestoreResults provides a mock function with given fields: bucket, backup, restore, results
|
||||
func (_m *BackupService) UploadRestoreResults(bucket string, backup string, restore string, results io.Reader) error {
|
||||
ret := _m.Called(bucket, backup, restore, results)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string, string, io.Reader) error); ok {
|
||||
r0 = rf(bucket, backup, restore, results)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
|
|
@ -65,8 +65,8 @@ func (r *TestRestore) WithBackup(name string) *TestRestore {
|
|||
return r
|
||||
}
|
||||
|
||||
func (r *TestRestore) WithErrors(e api.RestoreResult) *TestRestore {
|
||||
r.Status.Errors = e
|
||||
func (r *TestRestore) WithErrors(i int) *TestRestore {
|
||||
r.Status.Errors = i
|
||||
return r
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue