chore: enable early-return and superfluous-else from revive
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
parent
0dbff6d239
commit
199870ccac
|
@ -231,7 +231,6 @@ linters-settings:
|
|||
- name: dot-imports
|
||||
disabled: true
|
||||
- name: early-return
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: empty-block
|
||||
|
@ -246,14 +245,14 @@ linters-settings:
|
|||
disabled: true
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-naming
|
||||
|
|
|
@ -338,24 +338,24 @@ func (v *BackupVolumesInformation) generateVolumeInfoForSkippedPV() {
|
|||
tmpVolumeInfos := make([]*BackupVolumeInfo, 0)
|
||||
|
||||
for pvName, skippedReason := range v.SkippedPVs {
|
||||
if pvcPVInfo := v.pvMap.retrieve(pvName, "", ""); pvcPVInfo != nil {
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvName,
|
||||
SnapshotDataMoved: false,
|
||||
Skipped: true,
|
||||
SkippedReason: skippedReason,
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
} else {
|
||||
pvcPVInfo := v.pvMap.retrieve(pvName, "", "")
|
||||
if pvcPVInfo == nil {
|
||||
v.logger.Warnf("Cannot find info for PV %s", pvName)
|
||||
continue
|
||||
}
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvName,
|
||||
SnapshotDataMoved: false,
|
||||
Skipped: true,
|
||||
SkippedReason: skippedReason,
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
}
|
||||
|
||||
v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...)
|
||||
|
@ -366,32 +366,32 @@ func (v *BackupVolumesInformation) generateVolumeInfoForVeleroNativeSnapshot() {
|
|||
tmpVolumeInfos := make([]*BackupVolumeInfo, 0)
|
||||
|
||||
for _, nativeSnapshot := range v.NativeSnapshots {
|
||||
if pvcPVInfo := v.pvMap.retrieve(nativeSnapshot.Spec.PersistentVolumeName, "", ""); pvcPVInfo != nil {
|
||||
volumeResult := VolumeResultFailed
|
||||
if nativeSnapshot.Status.Phase == SnapshotPhaseCompleted {
|
||||
volumeResult = VolumeResultSucceeded
|
||||
}
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: NativeSnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
SnapshotDataMoved: false,
|
||||
Skipped: false,
|
||||
// Only set Succeeded to true when the NativeSnapshot's phase is Completed,
|
||||
// although NativeSnapshot doesn't check whether the snapshot creation result.
|
||||
Result: volumeResult,
|
||||
NativeSnapshotInfo: newNativeSnapshotInfo(nativeSnapshot),
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
} else {
|
||||
pvcPVInfo := v.pvMap.retrieve(nativeSnapshot.Spec.PersistentVolumeName, "", "")
|
||||
if pvcPVInfo == nil {
|
||||
v.logger.Warnf("cannot find info for PV %s", nativeSnapshot.Spec.PersistentVolumeName)
|
||||
continue
|
||||
}
|
||||
volumeResult := VolumeResultFailed
|
||||
if nativeSnapshot.Status.Phase == SnapshotPhaseCompleted {
|
||||
volumeResult = VolumeResultSucceeded
|
||||
}
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: NativeSnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
SnapshotDataMoved: false,
|
||||
Skipped: false,
|
||||
// Only set Succeeded to true when the NativeSnapshot's phase is Completed,
|
||||
// although NativeSnapshot doesn't check whether the snapshot creation result.
|
||||
Result: volumeResult,
|
||||
NativeSnapshotInfo: newNativeSnapshotInfo(nativeSnapshot),
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
}
|
||||
|
||||
v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...)
|
||||
|
@ -461,38 +461,38 @@ func (v *BackupVolumesInformation) generateVolumeInfoForCSIVolumeSnapshot() {
|
|||
if volumeSnapshotContent.Status.SnapshotHandle != nil {
|
||||
snapshotHandle = *volumeSnapshotContent.Status.SnapshotHandle
|
||||
}
|
||||
if pvcPVInfo := v.pvMap.retrieve("", *volumeSnapshot.Spec.Source.PersistentVolumeClaimName, volumeSnapshot.Namespace); pvcPVInfo != nil {
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: CSISnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
Skipped: false,
|
||||
SnapshotDataMoved: false,
|
||||
PreserveLocalSnapshot: true,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName,
|
||||
Size: size,
|
||||
Driver: volumeSnapshotClass.Driver,
|
||||
SnapshotHandle: snapshotHandle,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
ReadyToUse: volumeSnapshot.Status.ReadyToUse,
|
||||
},
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
|
||||
if volumeSnapshot.Status.CreationTime != nil {
|
||||
volumeInfo.StartTimestamp = volumeSnapshot.Status.CreationTime
|
||||
}
|
||||
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
} else {
|
||||
pvcPVInfo := v.pvMap.retrieve("", *volumeSnapshot.Spec.Source.PersistentVolumeClaimName, volumeSnapshot.Namespace)
|
||||
if pvcPVInfo == nil {
|
||||
v.logger.Warnf("cannot find info for PVC %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Spec.Source.PersistentVolumeClaimName)
|
||||
continue
|
||||
}
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: CSISnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
Skipped: false,
|
||||
SnapshotDataMoved: false,
|
||||
PreserveLocalSnapshot: true,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName,
|
||||
Size: size,
|
||||
Driver: volumeSnapshotClass.Driver,
|
||||
SnapshotHandle: snapshotHandle,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
ReadyToUse: volumeSnapshot.Status.ReadyToUse,
|
||||
},
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
|
||||
if volumeSnapshot.Status.CreationTime != nil {
|
||||
volumeInfo.StartTimestamp = volumeSnapshot.Status.CreationTime
|
||||
}
|
||||
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
}
|
||||
|
||||
v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...)
|
||||
|
@ -524,18 +524,18 @@ func (v *BackupVolumesInformation) generateVolumeInfoFromPVB() {
|
|||
continue
|
||||
}
|
||||
if pvcName != "" {
|
||||
if pvcPVInfo := v.pvMap.retrieve("", pvcName, pvb.Spec.Pod.Namespace); pvcPVInfo != nil {
|
||||
volumeInfo.PVCName = pvcPVInfo.PVCName
|
||||
volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace
|
||||
volumeInfo.PVName = pvcPVInfo.PV.Name
|
||||
volumeInfo.PVInfo = &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
}
|
||||
} else {
|
||||
pvcPVInfo := v.pvMap.retrieve("", pvcName, pvb.Spec.Pod.Namespace)
|
||||
if pvcPVInfo == nil {
|
||||
v.logger.Warnf("Cannot find info for PVC %s/%s", pvb.Spec.Pod.Namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
volumeInfo.PVCName = pvcPVInfo.PVCName
|
||||
volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace
|
||||
volumeInfo.PVName = pvcPVInfo.PV.Name
|
||||
volumeInfo.PVInfo = &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
}
|
||||
} else {
|
||||
v.logger.Debug("The PVB %s doesn't have a corresponding PVC", pvb.Name)
|
||||
}
|
||||
|
@ -615,51 +615,50 @@ func (v *BackupVolumesInformation) generateVolumeInfoFromDataUpload() {
|
|||
driverUsedByVSClass = vsClassList[index].Driver
|
||||
}
|
||||
}
|
||||
|
||||
if pvcPVInfo := v.pvMap.retrieve(
|
||||
pvcPVInfo := v.pvMap.retrieve(
|
||||
"",
|
||||
operation.Spec.ResourceIdentifier.Name,
|
||||
operation.Spec.ResourceIdentifier.Namespace,
|
||||
); pvcPVInfo != nil {
|
||||
dataMover := veleroDatamover
|
||||
if dataUpload.Spec.DataMover != "" {
|
||||
dataMover = dataUpload.Spec.DataMover
|
||||
}
|
||||
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: CSISnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
SnapshotDataMoved: true,
|
||||
Skipped: false,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
SnapshotHandle: FieldValueIsUnknown,
|
||||
VSCName: FieldValueIsUnknown,
|
||||
OperationID: FieldValueIsUnknown,
|
||||
Driver: driverUsedByVSClass,
|
||||
},
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: dataMover,
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
Phase: dataUpload.Status.Phase,
|
||||
},
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
|
||||
if dataUpload.Status.StartTimestamp != nil {
|
||||
volumeInfo.StartTimestamp = dataUpload.Status.StartTimestamp
|
||||
}
|
||||
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
} else {
|
||||
)
|
||||
if pvcPVInfo == nil {
|
||||
v.logger.Warnf("Cannot find info for PVC %s/%s", operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name)
|
||||
continue
|
||||
}
|
||||
dataMover := veleroDatamover
|
||||
if dataUpload.Spec.DataMover != "" {
|
||||
dataMover = dataUpload.Spec.DataMover
|
||||
}
|
||||
|
||||
volumeInfo := &BackupVolumeInfo{
|
||||
BackupMethod: CSISnapshot,
|
||||
PVCName: pvcPVInfo.PVCName,
|
||||
PVCNamespace: pvcPVInfo.PVCNamespace,
|
||||
PVName: pvcPVInfo.PV.Name,
|
||||
SnapshotDataMoved: true,
|
||||
Skipped: false,
|
||||
CSISnapshotInfo: &CSISnapshotInfo{
|
||||
SnapshotHandle: FieldValueIsUnknown,
|
||||
VSCName: FieldValueIsUnknown,
|
||||
OperationID: FieldValueIsUnknown,
|
||||
Driver: driverUsedByVSClass,
|
||||
},
|
||||
SnapshotDataMovementInfo: &SnapshotDataMovementInfo{
|
||||
DataMover: dataMover,
|
||||
UploaderType: velerov1api.BackupRepositoryTypeKopia,
|
||||
OperationID: operation.Spec.OperationID,
|
||||
Phase: dataUpload.Status.Phase,
|
||||
},
|
||||
PVInfo: &PVInfo{
|
||||
ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy),
|
||||
Labels: pvcPVInfo.PV.Labels,
|
||||
},
|
||||
}
|
||||
|
||||
if dataUpload.Status.StartTimestamp != nil {
|
||||
volumeInfo.StartTimestamp = dataUpload.Status.StartTimestamp
|
||||
}
|
||||
|
||||
tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo)
|
||||
}
|
||||
|
||||
v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...)
|
||||
|
@ -754,17 +753,16 @@ func (t *RestoreVolumeInfoTracker) Populate(ctx context.Context, restoredResourc
|
|||
t.pvcCSISnapshotMap[pvc.Namespace+"/"+pvcName] = *vs
|
||||
}
|
||||
}
|
||||
if pvc.Status.Phase == corev1api.ClaimBound && pvc.Spec.VolumeName != "" {
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
|
||||
log.WithError(err).Error("Failed to get PV")
|
||||
} else {
|
||||
t.pvPvc.insert(*pv, pvcName, pvcNS)
|
||||
}
|
||||
} else {
|
||||
if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
log.Warn("PVC is not bound or has no volume name")
|
||||
continue
|
||||
}
|
||||
pv := &corev1api.PersistentVolume{}
|
||||
if err := t.client.Get(ctx, kbclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv); err != nil {
|
||||
log.WithError(err).Error("Failed to get PV")
|
||||
} else {
|
||||
t.pvPvc.insert(*pv, pvcName, pvcNS)
|
||||
}
|
||||
}
|
||||
if err := t.client.List(ctx, t.datadownloadList, &kbclient.ListOptions{
|
||||
Namespace: t.restore.Namespace,
|
||||
|
@ -791,19 +789,18 @@ func (t *RestoreVolumeInfoTracker) Result() []*RestoreVolumeInfo {
|
|||
t.log.WithError(err).Warn("Fail to get PVC from PodVolumeRestore: ", pvr.Name)
|
||||
continue
|
||||
}
|
||||
if pvcName != "" {
|
||||
volumeInfo.PVCName = pvcName
|
||||
volumeInfo.PVCNamespace = pvr.Spec.Pod.Namespace
|
||||
if pvcPVInfo := t.pvPvc.retrieve("", pvcName, pvr.Spec.Pod.Namespace); pvcPVInfo != nil {
|
||||
volumeInfo.PVName = pvcPVInfo.PV.Name
|
||||
}
|
||||
} else {
|
||||
if pvcName == "" {
|
||||
// In this case, the volume is not bound to a PVC and
|
||||
// the PVR will not be able to populate into the volume, so we'll skip it
|
||||
t.log.Warnf("unable to get PVC for PodVolumeRestore %s/%s, pod: %s/%s, volume: %s",
|
||||
pvr.Namespace, pvr.Name, pvr.Spec.Pod.Namespace, pvr.Spec.Pod.Name, pvr.Spec.Volume)
|
||||
continue
|
||||
}
|
||||
volumeInfo.PVCName = pvcName
|
||||
volumeInfo.PVCNamespace = pvr.Spec.Pod.Namespace
|
||||
if pvcPVInfo := t.pvPvc.retrieve("", pvcName, pvr.Spec.Pod.Namespace); pvcPVInfo != nil {
|
||||
volumeInfo.PVName = pvcPVInfo.PV.Name
|
||||
}
|
||||
volumeInfos = append(volumeInfos, volumeInfo)
|
||||
}
|
||||
|
||||
|
|
|
@ -95,10 +95,9 @@ func (v *volumeHelperImpl) ShouldPerformSnapshot(obj runtime.Unstructured, group
|
|||
if action.Type == resourcepolicies.Snapshot {
|
||||
v.logger.Infof(fmt.Sprintf("performing snapshot action for pv %s", pv.Name))
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip snapshot action for pv %s as the action type is %s", pv.Name, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
v.logger.Infof("Skip snapshot action for pv %s as the action type is %s", pv.Name, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,11 +177,10 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
|||
v.logger.Infof("Perform fs-backup action for volume %s of pod %s due to volume policy match",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip fs-backup action for volume %s for pod %s because the action type is %s",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
v.logger.Infof("Skip fs-backup action for volume %s for pod %s because the action type is %s",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name, action.Type)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,11 +188,10 @@ func (v volumeHelperImpl) ShouldPerformFSBackup(volume corev1api.Volume, pod cor
|
|||
v.logger.Infof("Perform fs-backup action for volume %s of pod %s due to opt-in/out way",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name)
|
||||
return true, nil
|
||||
} else {
|
||||
v.logger.Infof("Skip fs-backup action for volume %s of pod %s due to opt-in/out way",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name)
|
||||
return false, nil
|
||||
}
|
||||
v.logger.Infof("Skip fs-backup action for volume %s of pod %s due to opt-in/out way",
|
||||
volume.Name, pod.Namespace+"/"+pod.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (v volumeHelperImpl) shouldPerformFSBackupLegacy(
|
||||
|
@ -211,17 +208,16 @@ func (v volumeHelperImpl) shouldPerformFSBackupLegacy(
|
|||
}
|
||||
|
||||
return false
|
||||
} else {
|
||||
// Check volume in opt-out way
|
||||
optOutVolumeNames := podvolumeutil.GetVolumesToExclude(&pod)
|
||||
for _, volumeName := range optOutVolumeNames {
|
||||
if volume.Name == volumeName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
// Check volume in opt-out way
|
||||
optOutVolumeNames := podvolumeutil.GetVolumesToExclude(&pod)
|
||||
for _, volumeName := range optOutVolumeNames {
|
||||
if volume.Name == volumeName {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *volumeHelperImpl) shouldIncludeVolumeInBackup(vol corev1api.Volume) bool {
|
||||
|
|
|
@ -321,24 +321,23 @@ func (p *pvcBackupItemAction) Execute(
|
|||
// Return without modification to not fail the backup,
|
||||
// and the above error log makes the backup partially fail.
|
||||
return item, nil, "", nil, nil
|
||||
} else {
|
||||
itemToUpdate = []velero.ResourceIdentifier{
|
||||
{
|
||||
GroupResource: schema.GroupResource{
|
||||
Group: "velero.io",
|
||||
Resource: "datauploads",
|
||||
},
|
||||
Namespace: dataUpload.Namespace,
|
||||
Name: dataUpload.Name,
|
||||
},
|
||||
}
|
||||
// Set the DataUploadNameLabel, which is used for restore to
|
||||
// let CSI plugin check whether it should handle the volume.
|
||||
// If volume is CSI migration, PVC doesn't have the annotation.
|
||||
annotations[velerov1api.DataUploadNameAnnotation] = dataUpload.Namespace + "/" + dataUpload.Name
|
||||
|
||||
dataUploadLog.Info("DataUpload is submitted successfully.")
|
||||
}
|
||||
itemToUpdate = []velero.ResourceIdentifier{
|
||||
{
|
||||
GroupResource: schema.GroupResource{
|
||||
Group: "velero.io",
|
||||
Resource: "datauploads",
|
||||
},
|
||||
Namespace: dataUpload.Namespace,
|
||||
Name: dataUpload.Name,
|
||||
},
|
||||
}
|
||||
// Set the DataUploadNameLabel, which is used for restore to
|
||||
// let CSI plugin check whether it should handle the volume.
|
||||
// If volume is CSI migration, PVC doesn't have the annotation.
|
||||
annotations[velerov1api.DataUploadNameAnnotation] = dataUpload.Namespace + "/" + dataUpload.Name
|
||||
|
||||
dataUploadLog.Info("DataUpload is submitted successfully.")
|
||||
} else {
|
||||
additionalItems = []velero.ResourceIdentifier{
|
||||
{
|
||||
|
|
|
@ -838,12 +838,11 @@ func zoneFromPVNodeAffinity(res *corev1api.PersistentVolume, topologyKeys ...str
|
|||
}
|
||||
for _, exp := range term.MatchExpressions {
|
||||
if keySet.Has(exp.Key) && exp.Operator == "In" && len(exp.Values) > 0 {
|
||||
if exp.Key == gkeCsiZoneKey {
|
||||
providerGke = true
|
||||
zones = append(zones, exp.Values[0])
|
||||
} else {
|
||||
if exp.Key != gkeCsiZoneKey {
|
||||
return exp.Key, exp.Values[0]
|
||||
}
|
||||
providerGke = true
|
||||
zones = append(zones, exp.Values[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,9 +35,8 @@ func CreateRetryGenerateName(client kbclient.Client, ctx context.Context, obj kb
|
|||
}
|
||||
if obj.GetGenerateName() != "" && obj.GetName() == "" {
|
||||
return retry.OnError(retry.DefaultRetry, apierrors.IsAlreadyExists, retryCreateFn)
|
||||
} else {
|
||||
return client.Create(ctx, obj, &kbclient.CreateOptions{})
|
||||
}
|
||||
return client.Create(ctx, obj, &kbclient.CreateOptions{})
|
||||
}
|
||||
|
||||
// CapBackoff provides a backoff with a set backoff cap
|
||||
|
|
|
@ -55,9 +55,8 @@ func (fr *fakeRunHelper) Init() error {
|
|||
func (fr *fakeRunHelper) RunCancelableDataPath(_ context.Context) (string, error) {
|
||||
if fr.runCancelableDataPathErr != nil {
|
||||
return "", fr.runCancelableDataPathErr
|
||||
} else {
|
||||
return fr.runCancelableDataPathResult, nil
|
||||
}
|
||||
return fr.runCancelableDataPathResult, nil
|
||||
}
|
||||
|
||||
func (fr *fakeRunHelper) Shutdown() {
|
||||
|
|
|
@ -46,9 +46,8 @@ func (em *exitWithMessageMock) CreateFile(name string) (*os.File, error) {
|
|||
|
||||
if em.writeFail {
|
||||
return os.OpenFile(em.filePath, os.O_CREATE|os.O_RDONLY, 0500)
|
||||
} else {
|
||||
return os.Create(em.filePath)
|
||||
}
|
||||
return os.Create(em.filePath)
|
||||
}
|
||||
|
||||
func TestExitWithMessage(t *testing.T) {
|
||||
|
|
|
@ -384,9 +384,8 @@ func setDefaultBackupLocation(ctx context.Context, client ctrlclient.Client, nam
|
|||
if apierrors.IsNotFound(err) {
|
||||
logger.WithField("backupStorageLocation", defaultBackupLocation).WithError(err).Warn("Failed to set default backup storage location at server start")
|
||||
return nil
|
||||
} else {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if !backupLocation.Spec.Default {
|
||||
|
@ -901,13 +900,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
|||
// wasn't found and it returns an error.
|
||||
func removeControllers(disabledControllers []string, enabledRuntimeControllers map[string]struct{}, logger logrus.FieldLogger) error {
|
||||
for _, controllerName := range disabledControllers {
|
||||
if _, ok := enabledRuntimeControllers[controllerName]; ok {
|
||||
logger.Infof("Disabling controller: %s", controllerName)
|
||||
delete(enabledRuntimeControllers, controllerName)
|
||||
} else {
|
||||
if _, ok := enabledRuntimeControllers[controllerName]; !ok {
|
||||
msg := fmt.Sprintf("Invalid value for --disable-controllers flag provided: %s. Valid values are: %s", controllerName, strings.Join(config.DisableableControllers, ","))
|
||||
return errors.New(msg)
|
||||
}
|
||||
logger.Infof("Disabling controller: %s", controllerName)
|
||||
delete(enabledRuntimeControllers, controllerName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -766,12 +766,11 @@ func describePodVolumeBackups(d *Describer, details bool, podVolumeBackups []vel
|
|||
// Get the type of pod volume uploader. Since the uploader only comes from a single source, we can
|
||||
// take the uploader type from the first element of the array.
|
||||
var uploaderType string
|
||||
if len(podVolumeBackups) > 0 {
|
||||
uploaderType = podVolumeBackups[0].Spec.UploaderType
|
||||
} else {
|
||||
if len(podVolumeBackups) == 0 {
|
||||
d.Printf("\tPod Volume Backups: <none included>\n")
|
||||
return
|
||||
}
|
||||
uploaderType = podVolumeBackups[0].Spec.UploaderType
|
||||
|
||||
if details {
|
||||
d.Printf("\tPod Volume Backups - %s:\n", uploaderType)
|
||||
|
|
|
@ -482,12 +482,11 @@ func describePodVolumeBackupsInSF(backups []velerov1api.PodVolumeBackup, details
|
|||
// Get the type of pod volume uploader. Since the uploader only comes from a single source, we can
|
||||
// take the uploader type from the first element of the array.
|
||||
var uploaderType string
|
||||
if len(backups) > 0 {
|
||||
uploaderType = backups[0].Spec.UploaderType
|
||||
} else {
|
||||
if len(backups) == 0 {
|
||||
backupVolumes["podVolumeBackups"] = "<none included>"
|
||||
return
|
||||
}
|
||||
uploaderType = backups[0].Spec.UploaderType
|
||||
// type display the type of pod volume backups
|
||||
podVolumeBackupsInfo["uploderType"] = uploaderType
|
||||
|
||||
|
|
|
@ -344,11 +344,10 @@ func describePodVolumeRestores(d *Describer, restores []velerov1api.PodVolumeRes
|
|||
// Get the type of pod volume uploader. Since the uploader only comes from a single source, we can
|
||||
// take the uploader type from the first element of the array.
|
||||
var uploaderType string
|
||||
if len(restores) > 0 {
|
||||
uploaderType = restores[0].Spec.UploaderType
|
||||
} else {
|
||||
if len(restores) == 0 {
|
||||
return
|
||||
}
|
||||
uploaderType = restores[0].Spec.UploaderType
|
||||
|
||||
if details {
|
||||
d.Printf("%s Restores:\n", uploaderType)
|
||||
|
|
|
@ -396,9 +396,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
|||
|
||||
if cnt > 0 {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error polling for deletion of restores")
|
||||
|
|
|
@ -202,9 +202,8 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
|||
log.Warnf("expose snapshot with err %v but it may caused by clean up resources in cancel action", err)
|
||||
r.restoreExposer.CleanUp(ctx, getDataDownloadOwnerObject(dd))
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
return r.errorOut(ctx, dd, err, "error to expose snapshot", log)
|
||||
}
|
||||
return r.errorOut(ctx, dd, err, "error to expose snapshot", log)
|
||||
}
|
||||
log.Info("Restore is exposed")
|
||||
|
||||
|
@ -279,9 +278,8 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
|||
if err == datapath.ConcurrentLimitExceed {
|
||||
log.Info("Data path instance is concurrent limited requeue later")
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
|
||||
} else {
|
||||
return r.errorOut(ctx, dd, err, "error to create data path", log)
|
||||
}
|
||||
return r.errorOut(ctx, dd, err, "error to create data path", log)
|
||||
}
|
||||
|
||||
if err := r.initCancelableDataPath(ctx, asyncBR, result, log); err != nil {
|
||||
|
@ -337,21 +335,20 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request
|
|||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
// put the finalizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataDownloadInFinalState(dd) && controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) {
|
||||
original := dd.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, dd, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// put the finalizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataDownloadInFinalState(dd) && controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) {
|
||||
original := dd.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(dd, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, dd, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *DataDownloadReconciler) initCancelableDataPath(ctx context.Context, asyncBR datapath.AsyncBR, res *exposer.ExposeResult, log logrus.FieldLogger) error {
|
||||
|
@ -706,9 +703,8 @@ func (r *DataDownloadReconciler) exclusiveUpdateDataDownload(ctx context.Context
|
|||
// it won't rollback dd in memory when error
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (r *DataDownloadReconciler) getTargetPVC(ctx context.Context, dd *velerov2alpha1api.DataDownload) (*v1.PersistentVolumeClaim, error) {
|
||||
|
@ -818,9 +814,8 @@ func UpdateDataDownloadWithRetry(ctx context.Context, client client.Client, name
|
|||
if apierrors.IsConflict(err) {
|
||||
log.Warnf("failed to update datadownload for %s/%s and will retry it", dd.Namespace, dd.Name)
|
||||
return false, nil
|
||||
} else {
|
||||
return false, errors.Wrapf(err, "error updating datadownload %s/%s", dd.Namespace, dd.Name)
|
||||
}
|
||||
return false, errors.Wrapf(err, "error updating datadownload %s/%s", dd.Namespace, dd.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -222,9 +222,8 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
log.Warnf("expose snapshot with err %v but it may caused by clean up resources in cancel action", err)
|
||||
r.cleanUp(ctx, du, log)
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
return r.errorOut(ctx, du, err, "error to expose snapshot", log)
|
||||
}
|
||||
return r.errorOut(ctx, du, err, "error to expose snapshot", log)
|
||||
}
|
||||
|
||||
log.Info("Snapshot is exposed")
|
||||
|
@ -305,9 +304,8 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
if err == datapath.ConcurrentLimitExceed {
|
||||
log.Info("Data path instance is concurrent limited requeue later")
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
|
||||
} else {
|
||||
return r.errorOut(ctx, du, err, "error to create data path", log)
|
||||
}
|
||||
return r.errorOut(ctx, du, err, "error to create data path", log)
|
||||
}
|
||||
|
||||
if err := r.initCancelableDataPath(ctx, asyncBR, res, log); err != nil {
|
||||
|
@ -365,20 +363,19 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
|||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
// put the finalizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state.
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataUploadInFinalState(du) && controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) {
|
||||
original := du.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(du, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
// put the finalizer remove action here for all cr will goes to the final status, we could check finalizer and do remove action in final status
|
||||
// instead of intermediate state.
|
||||
// remove finalizer no matter whether the cr is being deleted or not for it is no longer needed when internal resources are all cleaned up
|
||||
// also in final status cr won't block the direct delete of the velero namespace
|
||||
if isDataUploadInFinalState(du) && controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) {
|
||||
original := du.DeepCopy()
|
||||
controllerutil.RemoveFinalizer(du, DataUploadDownloadFinalizer)
|
||||
if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil {
|
||||
log.WithError(err).Error("error to remove finalizer")
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *DataUploadReconciler) initCancelableDataPath(ctx context.Context, asyncBR datapath.AsyncBR, res *exposer.ExposeResult, log logrus.FieldLogger) error {
|
||||
|
@ -782,9 +779,8 @@ func (r *DataUploadReconciler) exclusiveUpdateDataUpload(ctx context.Context, du
|
|||
// warn we won't rollback du values in memory when error
|
||||
if apierrors.IsConflict(err) {
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (r *DataUploadReconciler) closeDataPath(ctx context.Context, duName string) {
|
||||
|
@ -921,9 +917,8 @@ func UpdateDataUploadWithRetry(ctx context.Context, client client.Client, namesp
|
|||
if apierrors.IsConflict(err) {
|
||||
log.Warnf("failed to update dataupload for %s/%s and will retry it", du.Namespace, du.Name)
|
||||
return false, nil
|
||||
} else {
|
||||
return false, errors.Wrapf(err, "error updating dataupload with error %s/%s", du.Namespace, du.Name)
|
||||
}
|
||||
return false, errors.Wrapf(err, "error updating dataupload with error %s/%s", du.Namespace, du.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,9 +129,8 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||
if err != nil {
|
||||
if err == datapath.ConcurrentLimitExceed {
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
|
||||
} else {
|
||||
return r.errorOut(ctx, &pvb, err, "error to create data path", log)
|
||||
}
|
||||
return r.errorOut(ctx, &pvb, err, "error to create data path", log)
|
||||
}
|
||||
|
||||
r.metrics.RegisterPodVolumeBackupEnqueue(r.nodeName)
|
||||
|
|
|
@ -122,9 +122,8 @@ func (c *PodVolumeRestoreReconciler) Reconcile(ctx context.Context, req ctrl.Req
|
|||
if err != nil {
|
||||
if err == datapath.ConcurrentLimitExceed {
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: time.Second * 5}, nil
|
||||
} else {
|
||||
return c.errorOut(ctx, pvr, err, "error to create data path", log)
|
||||
}
|
||||
return c.errorOut(ctx, pvr, err, "error to create data path", log)
|
||||
}
|
||||
|
||||
original := pvr.DeepCopy()
|
||||
|
|
|
@ -200,10 +200,9 @@ func (r *restoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
|||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
} else {
|
||||
log.Error("DeletionTimestamp is marked but can't find the expected finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error("DeletionTimestamp is marked but can't find the expected finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// add finalizer
|
||||
|
@ -379,12 +378,12 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) (backupInf
|
|||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No backups found for schedule")
|
||||
}
|
||||
|
||||
if backup := mostRecentCompletedBackup(backupList.Items); backup.Name != "" {
|
||||
restore.Spec.BackupName = backup.Name
|
||||
} else {
|
||||
backup := mostRecentCompletedBackup(backupList.Items)
|
||||
if backup.Name == "" {
|
||||
restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, "No completed backups found for schedule")
|
||||
return backupInfo{}, nil
|
||||
}
|
||||
restore.Spec.BackupName = backup.Name
|
||||
}
|
||||
|
||||
info, err := r.fetchBackupInfo(restore.Spec.BackupName)
|
||||
|
@ -547,10 +546,9 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
|
|||
if err != nil {
|
||||
restoreLog.WithError(err).Errorf("fail to get VolumeInfos metadata file for backup %s", restore.Spec.BackupName)
|
||||
return errors.WithStack(err)
|
||||
} else {
|
||||
for _, volumeInfo := range volumeInfos {
|
||||
backupVolumeInfoMap[volumeInfo.PVName] = *volumeInfo
|
||||
}
|
||||
}
|
||||
for _, volumeInfo := range volumeInfos {
|
||||
backupVolumeInfoMap[volumeInfo.PVName] = *volumeInfo
|
||||
}
|
||||
|
||||
restoreLog.Info("starting restore")
|
||||
|
|
|
@ -77,12 +77,11 @@ var _ = Describe("Server Status Request Reconciler", func() {
|
|||
})
|
||||
|
||||
Expect(actualResult).To(BeEquivalentTo(test.expectedRequeue))
|
||||
if test.expectedErrMsg == "" {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
} else {
|
||||
if test.expectedErrMsg != "" {
|
||||
Expect(err.Error()).To(BeEquivalentTo(test.expectedErrMsg))
|
||||
return
|
||||
}
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
instance := &velerov1api.ServerStatusRequest{}
|
||||
err = r.client.Get(ctx, kbclient.ObjectKey{Name: test.req.Name, Namespace: test.req.Namespace}, instance)
|
||||
|
|
|
@ -149,9 +149,8 @@ func (r *BackupMicroService) RunCancelableDataPath(ctx context.Context) (string,
|
|||
|
||||
if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -137,9 +137,8 @@ func (r *RestoreMicroService) RunCancelableDataPath(ctx context.Context) (string
|
|||
|
||||
if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to wait dd")
|
||||
|
|
|
@ -21,9 +21,8 @@ import "fmt"
|
|||
func GetUploaderType(dataMover string) string {
|
||||
if dataMover == "" || dataMover == "velero" {
|
||||
return "kopia"
|
||||
} else {
|
||||
return dataMover
|
||||
}
|
||||
return dataMover
|
||||
}
|
||||
|
||||
func IsBuiltInUploader(dataMover string) bool {
|
||||
|
|
|
@ -91,7 +91,6 @@ func (m *Manager) GetAsyncBR(jobName string) AsyncBR {
|
|||
|
||||
if async, exist := m.tracker[jobName]; exist {
|
||||
return async
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -258,9 +258,8 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1.
|
|||
if apierrors.IsNotFound(err) {
|
||||
curLog.WithField("backup pod", backupPodName).Debugf("Backup pod is not running in the current node %s", exposeWaitParam.NodeName)
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "error to get backup pod %s", backupPodName)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error to get backup pod %s", backupPodName)
|
||||
}
|
||||
|
||||
curLog.WithField("pod", pod.Name).Infof("Backup pod is in running state in node %s", pod.Spec.NodeName)
|
||||
|
|
|
@ -172,9 +172,8 @@ func (e *genericRestoreExposer) GetExposed(ctx context.Context, ownerObject core
|
|||
if apierrors.IsNotFound(err) {
|
||||
curLog.WithField("restore pod", restorePodName).Debug("Restore pod is not running in the current node")
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "error to get restore pod %s", restorePodName)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error to get restore pod %s", restorePodName)
|
||||
}
|
||||
|
||||
curLog.WithField("pod", pod.Name).Infof("Restore pod is in running state in node %s", pod.Spec.NodeName)
|
||||
|
|
|
@ -370,9 +370,8 @@ func TestRebindVolume(t *testing.T) {
|
|||
if hookCount == 0 {
|
||||
hookCount++
|
||||
return false, nil, nil
|
||||
} else {
|
||||
return true, nil, errors.New("fake-patch-error")
|
||||
}
|
||||
return true, nil, errors.New("fake-patch-error")
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -117,9 +117,8 @@ func isRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace s
|
|||
return ErrDaemonSetNotFound
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KbClientIsRunningInNode checks if the node agent pod is running properly in a specified node through kube client. If not, return the error found
|
||||
|
|
|
@ -103,9 +103,8 @@ func GetBackendType(provider string, config map[string]string) BackendType {
|
|||
return bt
|
||||
} else if config != nil && config["s3Url"] != "" {
|
||||
return AWSBackend
|
||||
} else {
|
||||
return bt
|
||||
}
|
||||
return bt
|
||||
}
|
||||
|
||||
func IsBackendTypeValid(backendType BackendType) bool {
|
||||
|
|
|
@ -89,9 +89,8 @@ func (r *Ensurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, ba
|
|||
|
||||
// no repo found: create one and wait for it to be ready
|
||||
return r.createBackupRepositoryAndWait(ctx, namespace, backupRepoKey)
|
||||
} else {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (r *Ensurer) repoLock(key BackupRepositoryKey) *sync.Mutex {
|
||||
|
@ -126,9 +125,8 @@ func (r *Ensurer) waitBackupRepository(ctx context.Context, namespace string, ba
|
|||
} else if isBackupRepositoryNotFoundError(err) || isBackupRepositoryNotProvisionedError(err) {
|
||||
checkErr = err
|
||||
return false, nil
|
||||
} else {
|
||||
return false, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
err := wait.PollUntilContextTimeout(ctx, time.Millisecond*500, r.resourceTimeout, true, checkFunc)
|
||||
|
|
|
@ -213,11 +213,10 @@ func getJobConfig(
|
|||
); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
} else {
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
"fail to get repo maintenance job configs %s", repoMaintenanceJobConfig)
|
||||
}
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
"fail to get repo maintenance job configs %s", repoMaintenanceJobConfig)
|
||||
}
|
||||
|
||||
if cm.Data == nil {
|
||||
|
|
|
@ -100,13 +100,12 @@ func (p *volumeSnapshotContentRestoreItemAction) Execute(
|
|||
// Set the DeletionPolicy to Retain to avoid VS deletion will not trigger snapshot deletion
|
||||
vsc.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain
|
||||
|
||||
if vscFromBackup.Status != nil && vscFromBackup.Status.SnapshotHandle != nil {
|
||||
vsc.Spec.Source.VolumeHandle = nil
|
||||
vsc.Spec.Source.SnapshotHandle = vscFromBackup.Status.SnapshotHandle
|
||||
} else {
|
||||
if vscFromBackup.Status == nil || vscFromBackup.Status.SnapshotHandle == nil {
|
||||
p.log.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
return nil, errors.Errorf("fail to get snapshot handle from VSC %s status", vsc.Name)
|
||||
}
|
||||
vsc.Spec.Source.VolumeHandle = nil
|
||||
vsc.Spec.Source.SnapshotHandle = vscFromBackup.Status.SnapshotHandle
|
||||
|
||||
additionalItems := []velero.ResourceIdentifier{}
|
||||
if csi.IsVolumeSnapshotContentHasDeleteSecret(&vsc) {
|
||||
|
|
|
@ -65,9 +65,8 @@ func (a *ServiceAccountAction) Execute(input *velero.RestoreItemActionExecuteInp
|
|||
log.Debug("Match found - excluding this secret")
|
||||
serviceAccount.Secrets = append(serviceAccount.Secrets[:i], serviceAccount.Secrets[i+1:]...)
|
||||
break
|
||||
} else {
|
||||
log.Debug("No match found - including this secret")
|
||||
}
|
||||
log.Debug("No match found - including this secret")
|
||||
}
|
||||
|
||||
res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&serviceAccount)
|
||||
|
|
|
@ -1244,12 +1244,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
|||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
return warnings, errs, itemExists
|
||||
} else {
|
||||
obj, err = ctx.handleSkippedPVHasRetainPolicy(obj, restoreLogger)
|
||||
if err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
}
|
||||
obj, err = ctx.handleSkippedPVHasRetainPolicy(obj, restoreLogger)
|
||||
if err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -69,13 +69,13 @@ func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remote
|
|||
if bytesToWrite > 0 {
|
||||
offset := 0
|
||||
for bytesToWrite > 0 {
|
||||
if bytesWritten, err := targetFile.Write(buffer[offset:bytesToWrite]); err == nil {
|
||||
progressCb(int64(bytesWritten))
|
||||
bytesToWrite -= bytesWritten
|
||||
offset += bytesWritten
|
||||
} else {
|
||||
bytesWritten, err := targetFile.Write(buffer[offset:bytesToWrite])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to write data to file %s", o.targetFileName)
|
||||
}
|
||||
progressCb(int64(bytesWritten))
|
||||
bytesToWrite -= bytesWritten
|
||||
offset += bytesWritten
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,6 @@ func NewUploaderProvider(
|
|||
}
|
||||
if uploaderType == uploader.KopiaType {
|
||||
return NewKopiaUploaderProvider(requesterType, ctx, credGetter, backupRepo, log)
|
||||
} else {
|
||||
return NewResticUploaderProvider(repoIdentifier, bsl, credGetter, repoKeySelector, log)
|
||||
}
|
||||
return NewResticUploaderProvider(repoIdentifier, bsl, credGetter, repoKeySelector, log)
|
||||
}
|
||||
|
|
|
@ -185,9 +185,8 @@ func EnsureDeleteVS(ctx context.Context, snapshotClient snapshotter.SnapshotV1In
|
|||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errors.Errorf("timeout to assure VolumeSnapshot %s is deleted, finalizers in VS %v", vsName, updated.Finalizers)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error to assure VolumeSnapshot is deleted, %s", vsName)
|
||||
}
|
||||
return errors.Wrapf(err, "error to assure VolumeSnapshot is deleted, %s", vsName)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -244,9 +243,8 @@ func EnsureDeleteVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1I
|
|||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errors.Errorf("timeout to assure VolumeSnapshotContent %s is deleted, finalizers in VSC %v", vscName, updated.Finalizers)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error to assure VolumeSnapshotContent is deleted, %s", vscName)
|
||||
}
|
||||
return errors.Wrapf(err, "error to assure VolumeSnapshotContent is deleted, %s", vscName)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -693,11 +691,10 @@ func WaitUntilVSCHandleIsReady(
|
|||
return nil,
|
||||
errors.Errorf("CSI got timed out with error: %v",
|
||||
*vsc.Status.Error.Message)
|
||||
} else {
|
||||
log.Errorf(
|
||||
"Timed out awaiting reconciliation of volumesnapshot %s/%s",
|
||||
volSnap.Namespace, volSnap.Name)
|
||||
}
|
||||
log.Errorf(
|
||||
"Timed out awaiting reconciliation of volumesnapshot %s/%s",
|
||||
volSnap.Namespace, volSnap.Name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -123,9 +123,8 @@ func EnsureDeletePod(ctx context.Context, podGetter corev1client.CoreV1Interface
|
|||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errors.Errorf("timeout to assure pod %s is deleted, finalizers in pod %v", pod, updated.Finalizers)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error to assure pod is deleted, %s", pod)
|
||||
}
|
||||
return errors.Wrapf(err, "error to assure pod is deleted, %s", pod)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -51,10 +51,9 @@ func DeletePVAndPVCIfAny(ctx context.Context, client corev1client.CoreV1Interfac
|
|||
if apierrors.IsNotFound(err) {
|
||||
log.WithError(err).Debugf("Abort deleting PV and PVC, for related PVC doesn't exist, %s/%s", pvcNamespace, pvcName)
|
||||
return
|
||||
} else {
|
||||
log.Warnf("failed to get pvc %s/%s with err %v", pvcNamespace, pvcName, err)
|
||||
return
|
||||
}
|
||||
log.Warnf("failed to get pvc %s/%s with err %v", pvcNamespace, pvcName, err)
|
||||
return
|
||||
}
|
||||
|
||||
if pvcObj.Spec.VolumeName == "" {
|
||||
|
@ -153,9 +152,8 @@ func EnsureDeletePVC(ctx context.Context, pvcGetter corev1client.CoreV1Interface
|
|||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errors.Errorf("timeout to assure pvc %s is deleted, finalizers in pvc %v", pvcName, updated.Finalizers)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error to ensure pvc deleted for %s", pvcName)
|
||||
}
|
||||
return errors.Wrapf(err, "error to ensure pvc deleted for %s", pvcName)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -184,9 +182,8 @@ func EnsurePVDeleted(ctx context.Context, pvGetter corev1client.CoreV1Interface,
|
|||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return errors.Errorf("timeout to assure pv %s is deleted", pvName)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error to ensure pv is deleted for %s", pvName)
|
||||
}
|
||||
return errors.Wrapf(err, "error to ensure pv is deleted for %s", pvName)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -379,9 +376,8 @@ func WaitPVBound(ctx context.Context, pvGetter corev1client.CoreV1Interface, pvN
|
|||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error to wait for bound of PV")
|
||||
} else {
|
||||
return updated, nil
|
||||
}
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// IsPVCBound returns true if the specified PVC has been bound
|
||||
|
|
|
@ -140,10 +140,9 @@ func getInnermostTrace(err error) stackTracer {
|
|||
}
|
||||
|
||||
c, isCauser := err.(causer)
|
||||
if isCauser {
|
||||
err = c.Cause()
|
||||
} else {
|
||||
if !isCauser {
|
||||
return tracer
|
||||
}
|
||||
err = c.Cause()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ const NilString = "<nil>"
|
|||
func GetString(str *string) string {
|
||||
if str == nil {
|
||||
return NilString
|
||||
} else {
|
||||
return *str
|
||||
}
|
||||
return *str
|
||||
}
|
||||
|
|
|
@ -283,9 +283,8 @@ func runBackupDeletionTests(client TestClient, veleroCfg VeleroConfig, backupLoc
|
|||
err = DeleteBackup(context.Background(), backupName, &veleroCfg)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "|| UNEXPECTED || - Failed to delete backup %q", backupName)
|
||||
} else {
|
||||
fmt.Printf("|| EXPECTED || - Success to delete backup %s locally\n", backupName)
|
||||
}
|
||||
fmt.Printf("|| EXPECTED || - Success to delete backup %s locally\n", backupName)
|
||||
fmt.Printf("|| EXPECTED || - Backup deletion test completed successfully\n")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -389,9 +389,8 @@ func rerenderTestYaml(index int, group, path string) (string, error) {
|
|||
return fmt.Sprintf("RockBand%dList", index)
|
||||
} else if s == "rockbands" {
|
||||
return fmt.Sprintf("rockband%ds", index)
|
||||
} else {
|
||||
return fmt.Sprintf("rockband%d", index)
|
||||
}
|
||||
return fmt.Sprintf("rockband%d", index)
|
||||
})
|
||||
|
||||
// replace group name to new value
|
||||
|
|
|
@ -116,9 +116,7 @@ func (n *NamespaceMapping) Verify() error {
|
|||
}
|
||||
|
||||
func (n *NamespaceMapping) Clean() error {
|
||||
if CurrentSpecReport().Failed() && n.VeleroCfg.FailFast {
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
} else {
|
||||
if !(CurrentSpecReport().Failed() && n.VeleroCfg.FailFast) {
|
||||
if err := DeleteStorageClass(context.Background(), n.Client, KibishiiStorageClassName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -130,6 +128,7 @@ func (n *NamespaceMapping) Clean() error {
|
|||
|
||||
return n.GetTestCase().Clean()
|
||||
}
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -177,11 +177,10 @@ func (r *RBACCase) Destroy() error {
|
|||
}
|
||||
|
||||
func (r *RBACCase) Clean() error {
|
||||
if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast {
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
} else {
|
||||
if !(CurrentSpecReport().Failed() && r.VeleroCfg.FailFast) {
|
||||
return r.Destroy()
|
||||
}
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -651,15 +651,14 @@ func GetKubeConfigContext() error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if test.VeleroCfg.StandbyClusterContext != "" {
|
||||
tcStandby, err = k8s.NewTestClient(test.VeleroCfg.StandbyClusterContext)
|
||||
test.VeleroCfg.StandbyClient = &tcStandby
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if test.VeleroCfg.StandbyClusterContext == "" {
|
||||
return errors.New("migration test needs 2 clusters to run")
|
||||
}
|
||||
tcStandby, err = k8s.NewTestClient(test.VeleroCfg.StandbyClusterContext)
|
||||
test.VeleroCfg.StandbyClient = &tcStandby
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -177,17 +177,15 @@ func fileExist(ctx context.Context, namespace, podName, volume string) error {
|
|||
origin_content := strings.Replace(CreateFileContent(namespace, podName, volume), "\n", "", -1)
|
||||
if c == origin_content {
|
||||
return nil
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("UNEXPECTED: File %s does not exist in volume %s of pod %s in namespace %s.",
|
||||
FILE_NAME, volume, podName, namespace))
|
||||
}
|
||||
return errors.New(fmt.Sprintf("UNEXPECTED: File %s does not exist in volume %s of pod %s in namespace %s.",
|
||||
FILE_NAME, volume, podName, namespace))
|
||||
}
|
||||
func fileNotExist(ctx context.Context, namespace, podName, volume string) error {
|
||||
_, _, err := ReadFileFromPodVolume(ctx, namespace, podName, podName, volume, FILE_NAME)
|
||||
if err != nil {
|
||||
return nil
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("UNEXPECTED: File %s exist in volume %s of pod %s in namespace %s.",
|
||||
FILE_NAME, volume, podName, namespace))
|
||||
}
|
||||
return errors.New(fmt.Sprintf("UNEXPECTED: File %s exist in volume %s of pod %s in namespace %s.",
|
||||
FILE_NAME, volume, podName, namespace))
|
||||
}
|
||||
|
|
|
@ -118,19 +118,18 @@ func (l *LabelSelector) Verify() error {
|
|||
fmt.Printf("Checking resources in namespaces ...%s\n", namespace)
|
||||
//Check deployment
|
||||
_, err := GetDeployment(l.Client.ClientGo, namespace, l.CaseBaseName)
|
||||
if nsNum%2 == 1 { //include
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace))
|
||||
}
|
||||
} else { //exclude
|
||||
if nsNum%2 != 1 { //exclude
|
||||
if err == nil {
|
||||
return fmt.Errorf("failed to exclude deployment in namespaces %q", namespace)
|
||||
} else {
|
||||
if apierrors.IsNotFound(err) { //resource should be excluded
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace))
|
||||
}
|
||||
if apierrors.IsNotFound(err) { //resource should be excluded
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace))
|
||||
}
|
||||
//include
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list deployment in namespace: %q", namespace))
|
||||
}
|
||||
|
||||
//Check secrets
|
||||
|
@ -138,21 +137,20 @@ func (l *LabelSelector) Verify() error {
|
|||
LabelSelector: l.labelSelector,
|
||||
})
|
||||
|
||||
if nsNum%2 == 0 { //include
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list secrets in namespace: %q", namespace))
|
||||
} else if len(secretsList.Items) == 0 {
|
||||
return errors.Errorf("no secrets found in namespace: %q", namespace)
|
||||
}
|
||||
} else { //exclude
|
||||
if nsNum%2 != 0 { //exclude
|
||||
if err == nil {
|
||||
return fmt.Errorf("failed to exclude secrets in namespaces %q", namespace)
|
||||
} else {
|
||||
if apierrors.IsNotFound(err) { //resource should be excluded
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list secrets in namespace: %q", namespace))
|
||||
}
|
||||
if apierrors.IsNotFound(err) { //resource should be excluded
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list secrets in namespace: %q", namespace))
|
||||
}
|
||||
//include
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("failed to list secrets in namespace: %q", namespace))
|
||||
} else if len(secretsList.Items) == 0 {
|
||||
return errors.Errorf("no secrets found in namespace: %q", namespace)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -131,15 +131,14 @@ func (r *ResourceModifiersCase) Verify() error {
|
|||
|
||||
func (r *ResourceModifiersCase) Clean() error {
|
||||
// If created some resources which is not in current test namespace, we NEED to override the base Clean function
|
||||
if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast {
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
} else {
|
||||
if !(CurrentSpecReport().Failed() && r.VeleroCfg.FailFast) {
|
||||
if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.GetTestCase().Clean() // only clean up resources in test namespace
|
||||
}
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -173,15 +173,14 @@ func (r *ResourcePoliciesCase) Verify() error {
|
|||
|
||||
func (r *ResourcePoliciesCase) Clean() error {
|
||||
// If created some resources which is not in current test namespace, we NEED to override the base Clean function
|
||||
if CurrentSpecReport().Failed() && r.VeleroCfg.FailFast {
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
} else {
|
||||
if !(CurrentSpecReport().Failed() && r.VeleroCfg.FailFast) {
|
||||
if err := DeleteConfigMap(r.Client.ClientGo, r.VeleroCfg.VeleroNamespace, r.cmName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.GetTestCase().Clean() // only clean up resources in test namespace
|
||||
}
|
||||
fmt.Println("Test case failed and fail fast is enabled. Skip resource clean up.")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -149,13 +149,12 @@ func CheckVolumeSnapshotCR(client TestClient, index map[string]string, expectedC
|
|||
return nil, errors.New("Fail to get APIVersion")
|
||||
}
|
||||
|
||||
if apiVersion[0] == "v1" {
|
||||
if snapshotContentNameList, err = GetCsiSnapshotHandle(client, apiVersion[0], index); err != nil {
|
||||
return nil, errors.Wrap(err, "Fail to get CSI snapshot content")
|
||||
}
|
||||
} else {
|
||||
if apiVersion[0] != "v1" {
|
||||
return nil, errors.New("API version is invalid")
|
||||
}
|
||||
if snapshotContentNameList, err = GetCsiSnapshotHandle(client, apiVersion[0], index); err != nil {
|
||||
return nil, errors.Wrap(err, "Fail to get CSI snapshot content")
|
||||
}
|
||||
if expectedCount >= 0 {
|
||||
if len(snapshotContentNameList) != expectedCount {
|
||||
return nil, errors.New(fmt.Sprintf("Snapshot content count %d is not as expect %d", len(snapshotContentNameList), expectedCount))
|
||||
|
|
|
@ -336,14 +336,12 @@ func FileExistInPV(ctx context.Context, namespace, podName, containerName, volum
|
|||
output := fmt.Sprintf("%s:%s", stdout, stderr)
|
||||
if strings.Contains(output, fmt.Sprintf("/%s/%s: No such file or directory", volume, filename)) {
|
||||
return false, nil
|
||||
} else {
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s",
|
||||
filename, volume, podName, namespace))
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, errors.Wrap(err, fmt.Sprintf("Fail to read file %s from volume %s of pod %s in %s",
|
||||
filename, volume, podName, namespace))
|
||||
}
|
||||
func ReadFileFromPodVolume(ctx context.Context, namespace, podName, containerName, volume, filename string) (string, string, error) {
|
||||
arg := []string{"exec", "-n", namespace, "-c", containerName, podName,
|
||||
|
|
|
@ -115,10 +115,9 @@ func DeleteNamespace(ctx context.Context, client TestClient, namespace string, w
|
|||
if err != nil {
|
||||
fmt.Printf("Get namespace %s err: %v", namespace, err)
|
||||
return false, err
|
||||
} else {
|
||||
if !slices.Contains(nsList, namespace) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
if !slices.Contains(nsList, namespace) {
|
||||
return true, nil
|
||||
}
|
||||
fmt.Printf("namespace %q is still being deleted...\n", namespace)
|
||||
logrus.Debugf("namespace %q is still being deleted...", namespace)
|
||||
|
@ -190,16 +189,16 @@ func CleanupNamespaces(ctx context.Context, client TestClient, CaseBaseName stri
|
|||
func WaitAllSelectedNSDeleted(ctx context.Context, client TestClient, label string) error {
|
||||
return waitutil.PollImmediateInfinite(5*time.Second,
|
||||
func() (bool, error) {
|
||||
if ns, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{LabelSelector: label}); err != nil {
|
||||
ns, err := client.ClientGo.CoreV1().Namespaces().List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if ns == nil {
|
||||
return true, nil
|
||||
} else if len(ns.Items) == 0 {
|
||||
return true, nil
|
||||
} else {
|
||||
logrus.Debugf("%d namespaces is still being deleted...\n", len(ns.Items))
|
||||
return false, nil
|
||||
}
|
||||
logrus.Debugf("%d namespaces is still being deleted...\n", len(ns.Items))
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ func GetMinioDiskUsage(cloudCredentialsFile string, bslBucket string, bslPrefix
|
|||
toatalSize, err := aws.GetMinioBucketSize(cloudCredentialsFile, bslBucket, bslPrefix, bslConfig)
|
||||
if err != nil {
|
||||
return 0, errors.Errorf("a Failed to get minio bucket size with err %v", err)
|
||||
} else {
|
||||
return toatalSize, nil
|
||||
}
|
||||
return toatalSize, nil
|
||||
}
|
||||
|
|
|
@ -362,10 +362,9 @@ func (s AWSStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupObj
|
|||
}
|
||||
if actualCount != snapshotCheck.ExpectCount {
|
||||
return errors.New(fmt.Sprintf("Snapshot count %d is not as expected %d", actualCount, snapshotCheck.ExpectCount))
|
||||
} else {
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", actualCount, snapshotCheck.ExpectCount)
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", actualCount, snapshotCheck.ExpectCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s AWSStorage) GetMinioBucketSize(cloudCredentialsFile, bslBucket, bslPrefix, bslConfig string) (int64, error) {
|
||||
|
|
|
@ -126,9 +126,8 @@ func getStorageAccountKey(credentialsFile, accountName, subscriptionID, resource
|
|||
if os.Getenv(resourceGroupEnvVar) == "" {
|
||||
if resourceGroupCfg == "" {
|
||||
return "", errors.New("Credential file should contain AZURE_RESOURCE_GROUP or AZURE_STORAGE_ACCOUNT_ACCESS_KEY")
|
||||
} else {
|
||||
resourceGroup = resourceGroupCfg
|
||||
}
|
||||
resourceGroup = resourceGroupCfg
|
||||
} else {
|
||||
resourceGroup = os.Getenv(resourceGroupEnvVar)
|
||||
}
|
||||
|
@ -375,10 +374,9 @@ func (s AzureStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupN
|
|||
}
|
||||
if snapshotCountFound != snapshotCheck.ExpectCount {
|
||||
return errors.New(fmt.Sprintf("Snapshot count %d is not as expected %d\n", snapshotCountFound, snapshotCheck.ExpectCount))
|
||||
} else {
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", snapshotCountFound, snapshotCheck.ExpectCount)
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", snapshotCountFound, snapshotCheck.ExpectCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s AzureStorage) GetObject(cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, objectKey string) (io.ReadCloser, error) {
|
||||
|
|
|
@ -138,10 +138,9 @@ func (s GCSStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupObj
|
|||
|
||||
if snapshotCountFound != snapshotCheck.ExpectCount {
|
||||
return errors.New(fmt.Sprintf("Snapshot count %d is not as expected %d\n", snapshotCountFound, len(snapshotCheck.SnapshotIDList)))
|
||||
} else {
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", snapshotCountFound, len(snapshotCheck.SnapshotIDList))
|
||||
return nil
|
||||
}
|
||||
fmt.Printf("Snapshot count %d is as expected %d\n", snapshotCountFound, len(snapshotCheck.SnapshotIDList))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s GCSStorage) GetObject(cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, objectKey string) (io.ReadCloser, error) {
|
||||
|
|
|
@ -192,15 +192,14 @@ func generateVSpherePlugin(veleroCfg *test.VeleroConfig) error {
|
|||
|
||||
if err := createVCCredentialSecret(cli.ClientGo, veleroCfg.VeleroNamespace); err != nil {
|
||||
// For TKGs/uTKG the VC secret is not supposed to exist.
|
||||
if apierrors.IsNotFound(err) {
|
||||
clusterFlavor = "GUEST"
|
||||
} else {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.WithMessagef(
|
||||
err,
|
||||
"Failed to create virtual center credential secret in %s namespace",
|
||||
veleroCfg.VeleroNamespace,
|
||||
)
|
||||
}
|
||||
clusterFlavor = "GUEST"
|
||||
}
|
||||
|
||||
_, err := k8s.CreateConfigMap(
|
||||
|
@ -617,14 +616,13 @@ func IsVeleroReady(ctx context.Context, veleroCfg *test.VeleroConfig) (bool, err
|
|||
"-o", "json", "-n", namespace))
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to get the node-agent daemonset, stdout=%s, stderr=%s", stdout, stderr)
|
||||
} else {
|
||||
daemonset := &apps.DaemonSet{}
|
||||
if err = json.Unmarshal([]byte(stdout), daemonset); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal the node-agent daemonset")
|
||||
}
|
||||
if daemonset.Status.DesiredNumberScheduled != daemonset.Status.NumberAvailable {
|
||||
return false, fmt.Errorf("the available number pod %d in node-agent daemonset not equal to scheduled number %d", daemonset.Status.NumberAvailable, daemonset.Status.DesiredNumberScheduled)
|
||||
}
|
||||
}
|
||||
daemonset := &apps.DaemonSet{}
|
||||
if err = json.Unmarshal([]byte(stdout), daemonset); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to unmarshal the node-agent daemonset")
|
||||
}
|
||||
if daemonset.Status.DesiredNumberScheduled != daemonset.Status.NumberAvailable {
|
||||
return false, fmt.Errorf("the available number pod %d in node-agent daemonset not equal to scheduled number %d", daemonset.Status.NumberAvailable, daemonset.Status.DesiredNumberScheduled)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -396,9 +396,8 @@ func CheckScheduleWithResourceOrder(ctx context.Context, veleroCLI, veleroNamesp
|
|||
}
|
||||
if reflect.DeepEqual(schedule.Spec.Template.OrderedResources, order) {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("resource order %v set in schedule command is not equal with order %v stored in schedule cr", order, schedule.Spec.Template.OrderedResources)
|
||||
}
|
||||
return fmt.Errorf("resource order %v set in schedule command is not equal with order %v stored in schedule cr", order, schedule.Spec.Template.OrderedResources)
|
||||
}
|
||||
|
||||
func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespace, backupName string, orderResources map[string]string) error {
|
||||
|
@ -417,9 +416,8 @@ func CheckBackupWithResourceOrder(ctx context.Context, veleroCLI, veleroNamespac
|
|||
}
|
||||
if reflect.DeepEqual(backup.Spec.OrderedResources, orderResources) {
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("resource order %v set in backup command is not equal with order %v stored in backup cr", orderResources, backup.Spec.OrderedResources)
|
||||
}
|
||||
return fmt.Errorf("resource order %v set in backup command is not equal with order %v stored in backup cr", orderResources, backup.Spec.OrderedResources)
|
||||
}
|
||||
|
||||
// VeleroBackupNamespace uses the veleroCLI to backup a namespace.
|
||||
|
@ -1065,37 +1063,35 @@ func IsBackupExist(ctx context.Context, backupName string, veleroCfg *VeleroConf
|
|||
|
||||
func WaitBackupDeleted(ctx context.Context, backupName string, timeout time.Duration, veleroCfg *VeleroConfig) error {
|
||||
return wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
|
||||
if exist, err := IsBackupExist(ctx, backupName, veleroCfg); err != nil {
|
||||
exist, err := IsBackupExist(ctx, backupName, veleroCfg)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
if exist {
|
||||
return false, nil
|
||||
} else {
|
||||
fmt.Printf("Backup %s does not exist\n", backupName)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
if exist {
|
||||
return false, nil
|
||||
}
|
||||
fmt.Printf("Backup %s does not exist\n", backupName)
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func WaitForExpectedStateOfBackup(ctx context.Context, backupName string,
|
||||
timeout time.Duration, existing bool, veleroCfg *VeleroConfig) error {
|
||||
return wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
|
||||
if exist, err := IsBackupExist(ctx, backupName, veleroCfg); err != nil {
|
||||
exist, err := IsBackupExist(ctx, backupName, veleroCfg)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
msg := "does not exist as expect"
|
||||
if exist {
|
||||
msg = "was found as expect"
|
||||
}
|
||||
if exist == existing {
|
||||
fmt.Println("Backup <" + backupName + "> " + msg)
|
||||
return true, nil
|
||||
} else {
|
||||
fmt.Println("Backup <" + backupName + "> " + msg)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
msg := "does not exist as expect"
|
||||
if exist {
|
||||
msg = "was found as expect"
|
||||
}
|
||||
if exist == existing {
|
||||
fmt.Println("Backup <" + backupName + "> " + msg)
|
||||
return true, nil
|
||||
}
|
||||
fmt.Println("Backup <" + backupName + "> " + msg)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1256,9 +1252,8 @@ func SnapshotCRsCountShouldBe(ctx context.Context, namespace, backupName string,
|
|||
}
|
||||
if count == expectedCount {
|
||||
return nil
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("SnapshotCR count %d of backup %s in namespace %s is not as expected %d", count, backupName, namespace, expectedCount))
|
||||
}
|
||||
return errors.New(fmt.Sprintf("SnapshotCR count %d of backup %s in namespace %s is not as expected %d", count, backupName, namespace, expectedCount))
|
||||
}
|
||||
|
||||
func BackupRepositoriesCountShouldBe(ctx context.Context, veleroNamespace, targetNamespace string, expectedCount int) error {
|
||||
|
@ -1268,9 +1263,8 @@ func BackupRepositoriesCountShouldBe(ctx context.Context, veleroNamespace, targe
|
|||
}
|
||||
if len(resticArr) == expectedCount {
|
||||
return nil
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("BackupRepositories count %d in namespace %s is not as expected %d", len(resticArr), targetNamespace, expectedCount))
|
||||
}
|
||||
return errors.New(fmt.Sprintf("BackupRepositories count %d in namespace %s is not as expected %d", len(resticArr), targetNamespace, expectedCount))
|
||||
}
|
||||
|
||||
func GetRepositories(ctx context.Context, veleroNamespace, targetNamespace string) ([]string, error) {
|
||||
|
@ -1588,9 +1582,8 @@ func IsSupportUploaderType(version string) (bool, error) {
|
|||
}
|
||||
if v.AtLeast(verSupportUploaderType) {
|
||||
return true, nil
|
||||
} else {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func GetVeleroPodName(ctx context.Context) ([]string, error) {
|
||||
|
|
Loading…
Reference in New Issue