8000 chore: Revert "feat: add an option to merge full and pitr restore job (#9404)" by gnolong · Pull Request #9476 · apecloud/kubeblocks · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

chore: Revert "feat: add an option to merge full and pitr restore job (#9404)" #9476

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions pkg/constant/annotations.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,13 +62,6 @@ const (
HorizontalScaleBackupPolicyTemplateKey = "apps.kubeblocks.io/horizontal-scale-backup-policy-template"
)

const (
// SkipBaseBackupRestoreInPitrAnnotationKey is an experimental api to unify full and continuous restore job.
// It is set on the actionset CR.
// If this annotaion is set to "true", then only one job will be created during restore.
SkipBaseBackupRestoreInPitrAnnotationKey = "dataprotection.kubeblocks.io/skip-base-backup-restore-in-pitr"
)

// annotations for multi-cluster
const (
KBAppMultiClusterPlacementKey = "apps.kubeblocks.io/multi-cluster-placement"
Expand Down
32 changes: 8 additions & 24 deletions pkg/dataprotection/restore/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,41 +166,25 @@ func (r *RestoreManager) BuildContinuousRestoreManager(reqCtx intctrlutil.Reques
}
}

baseBackupSet, err := r.getBaseBackupActionSetForContinuous(reqCtx, cli, continuousBackup, metav1.NewTime(restoreTime))
if err != nil || baseBackupSet == nil {
backupSet, err := r.getBackupActionSetForContinuous(reqCtx, cli, continuousBackup, metav1.NewTime(restoreTime))
if err != nil || backupSet == nil {
return err
}

skipBaseBackupRestoreInPitr := false
if continuousBackupSet.ActionSet.Annotations != nil {
if continuousBackupSet.ActionSet.Annotations[constant.SkipBaseBackupRestoreInPitrAnnotationKey] == "true" {
skipBaseBackupRestoreInPitr = true
}
}

// set base backup
continuousBackupSet.BaseBackup = baseBackupSet.Backup
if baseBackupSet.ActionSet != nil && baseBackupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental {
if skipBaseBackupRestoreInPitr {
return intctrlutil.NewFatalError("unify incremental and continuous restore job is not supported")
}
if err = r.BuildIncrementalBackupActionSet(reqCtx, cli, *baseBackupSet); err != nil {
continuousBackupSet.BaseBackup = backupSet.Backup
if backupSet.ActionSet != nil && backupSet.ActionSet.Spec.BackupType == dpv1alpha1.BackupTypeIncremental {
if err = r.BuildIncrementalBackupActionSet(reqCtx, cli, *backupSet); err != nil {
return err
}
r.SetBackupSets(continuousBackupSet)
} else {
if skipBaseBackupRestoreInPitr {
r.Recorder.Event(r.Restore, corev1.EventTypeNormal, "SkipBaseBackupRestoreInPitr", "base backup restore skipped")
r.SetBackupSets(continuousBackupSet)
} else {
r.SetBackupSets(*baseBackupSet, continuousBackupSet)
}
r.SetBackupSets(*backupSet, continuousBackupSet)
}
return nil
}

// getBaseBackupActionSetForContinuous gets full or incremental backup and actionSet for continuous.
func (r *RestoreManager) getBaseBackupActionSetForContinuous(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup, restoreTime metav1.Time) (*BackupActionSet, error) {
// getBackupActionSetForContinuous gets full or incremental backup and actionSet for continuous.
func (r *RestoreManager) getBackupActionSetForContinuous(reqCtx intctrlutil.RequestCtx, cli client.Client, continuousBackup *dpv1alpha1.Backup, restoreTime metav1.Time) (*BackupActionSet, error) {
notFoundLatestBackup := func() (*BackupActionSet, error) {
return nil, intctrlutil.NewFatalError(fmt.Sprintf(`can not found latest full or incremental backup based on backupPolicy "%s" and specified restoreTime "%s"`,
continuousBackup.Spec.BackupPolicyName, restoreTime))
Expand Down
86 changes: 5 additions & 81 deletions pkg/dataprotection/restore/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,40 +97,16 @@ var _ = Describe("Backup Deleter Test", func() {

})

mockBackupForRestore := func(
testCtx *testutil.TestContext, actionSetName, backupPVCName string,
mockBackupCompleted, useVolumeSnapshotBackup bool,
backupType dpv1alpha1.BackupType,
startTime, endTime string,
backupName string,
) *dpv1alpha1.Backup {
backup := testdp.NewFakeBackup(testCtx, func(backup *dpv1alpha1.Backup) {
if backup.Labels == nil {
backup.Labels = make(map[string]string)
}
backup.Labels[dptypes.BackupTypeLabelKey] = string(backupType)
backup.Labels[dptypes.BackupPolicyLabelKey] = testdp.BackupPolicyName
if backupName != "" {
backup.Name = backupName
}
})
mockBackupForRestore := func(testCtx *testutil.TestContext, actionSetName, backupPVCName string, mockBackupCompleted, useVolumeSnapshotBackup bool) *dpv1alpha1.Backup {
backup := testdp.NewFakeBackup(testCtx, nil)
if mockBackupCompleted {
// then mock backup to completed
backupMethodName := testdp.BackupMethodName
if useVolumeSnapshotBackup {
backupMethodName = testdp.VSBackupMethodName
}
Expect(testapps.ChangeObjStatus(testCtx, backup, func() {
var end *metav1.Time
if endTime != "" {
endTime, _ := time.Parse(time.RFC3339, endTime)
end = &metav1.Time{Time: endTime}
}
var start *metav1.Time
if startTime != "" {
startTime, _ := time.Parse(time.RFC3339, startTime)
start = &metav1.Time{Time: startTime}
}
endTime, _ := time.Parse(time.RFC3339, "2023-01-01T10:00:00Z")
backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted
backup.Status.PersistentVolumeClaimName = backupPVCName
testdp.MockBackupStatusTarget(backup, dpv1alpha1.PodSelectionStrategyAny)
Expand All @@ -139,8 +115,7 @@ var _ = Describe("Backup Deleter Test", func() {
}
backup.Status.TimeRange = &dpv1alpha1.BackupTimeRange{
TimeZone: "+08:00",
Start: start,
End: end,
End: &metav1.Time{Time: endTime},
}
testdp.MockBackupStatusMethod(backup, backupMethodName, testdp.DataVolumeName, actionSetName)
})).Should(Succeed())
Expand All @@ -150,7 +125,7 @@ var _ = Describe("Backup Deleter Test", func() {

initResources := func(reqCtx intctrlutil.RequestCtx, _ int, useVolumeSnapshot bool, change func(f *testdp.MockRestoreFactory)) (*RestoreManager, *BackupActionSet) {
By("create a completed backup")
backup := mockBackupForRestore(&testCtx, actionSet.Name, testdp.BackupPVCName, true, useVolumeSnapshot, dpv1alpha1.BackupTypeFull, "", "2023-01-01T10:00:00Z", "")
backup := mockBackupForRestore(&testCtx, action D0E4 Set.Name, testdp.BackupPVCName, true, useVolumeSnapshot)

schedulingSpec := dpv1alpha1.SchedulingSpec{
NodeName: nodeName,
Expand Down Expand Up @@ -383,57 +358,6 @@ var _ = Describe("Backup Deleter Test", func() {
})).Should(Succeed())
testPostReady(false)
})

Context("BuildContinuousRestoreManager", func() {
It("respects UnifyFullAndContinuousRestore annotation", func() {
By("create a continuous backup")
continuousBackup := mockBackupForRestore(
&testCtx, actionSet.Name, testdp.BackupPVCName, true, false, dpv1alpha1.BackupTypeContinuous,
"2023-01-01T09:00:00Z", "2023-01-01T12:00:00Z", "test-backup-continuous",
)

By("create a completed backup")
_ = mockBackupForRestore(&testCtx, actionSet.Name, testdp.BackupPVCName, true, false, dpv1alpha1.BackupTypeFull, "", "2023-01-01T10:00:00Z", "")

schedulingSpec := dpv1alpha1.SchedulingSpec{
NodeName: nodeName,
}

By("create restore")
restore := testdp.NewRestoreFactory(testCtx.DefaultNamespace, testdp.RestoreName).
SetBackup(continuousBackup.Name, testCtx.DefaultNamespace).
SetSchedulingSpec(schedulingSpec).
Create(&testCtx).
SetRestoreTime("2023-01-01T11:30:00Z").
Get()

By("create restore manager")
reqCtx := getReqCtx()
restoreMGR := NewRestoreManager(restore, recorder, k8sClient.Scheme(), k8sClient)
backupSet, err := restoreMGR.GetBackupActionSetByNamespaced(reqCtx, k8sClient, continuousBackup.Name, testCtx.DefaultNamespace)
Expect(err).ShouldNot(HaveOccurred())

Expect(restoreMGR.BuildContinuousRestoreManager(reqCtx, k8sClient, *backupSet)).Should(Succeed())
Expect(restoreMGR.PostReadyBackupSets).Should(HaveLen(2))

By("set UnifyFullAndContinuousRestore annotation")
Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(actionSet), func(actionset *dpv1alpha1.ActionSet) {
if actionset.Annotations == nil {
actionset.Annotations = make(map[string]string)
}
actionset.Annotations[constant.SkipBaseBackupRestoreInPitrAnnotationKey] = "true"
})).Should(Succeed())

By("check length of backupsets")
restoreMGR = NewRestoreManager(restore, recorder, k8sClient.Scheme(), k8sClient)
backupSet, err = restoreMGR.GetBackupActionSetByNamespaced(reqCtx, k8sClient, continuousBackup.Name, testCtx.DefaultNamespace)
Expect(err).ShouldNot(HaveOccurred())

Expect(restoreMGR.BuildContinuousRestoreManager(reqCtx, k8sClient, *backupSet)).Should(Succeed())
Expect(restoreMGR.PostReadyBackupSets).Should(HaveLen(1))

})
})
})

})
0