fix: Fixes migrating when a remote already exists (#23912)
* fix: handle migrating with already defined remotes * test: add test to verify migrating already defined remotes * fix: properly handle Uppull/23933/head
parent
666cabb1f4
commit
f026d7bdaf
|
@ -525,6 +525,39 @@ func TestMigrateDownFromReplicationsWithName(t *testing.T) {
|
|||
require.Equal(t, platform.ID(10), rs.Replications[0].ID)
|
||||
}
|
||||
|
||||
func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) {
|
||||
sqlStore, clean := sqlite.NewTestStore(t)
|
||||
logger := zaptest.NewLogger(t)
|
||||
sqliteMigrator := sqlite.NewMigrator(sqlStore, logger)
|
||||
require.NoError(t, sqliteMigrator.UpUntil(ctx, 7, migrations.AllUp))
|
||||
|
||||
// Make sure foreign-key checking is enabled.
|
||||
_, err := sqlStore.DB.Exec("PRAGMA foreign_keys = ON;")
|
||||
require.NoError(t, err)
|
||||
|
||||
testStore := NewStore(sqlStore)
|
||||
defer clean(t)
|
||||
|
||||
insertRemote(t, testStore, replication.RemoteID)
|
||||
|
||||
req := createReq
|
||||
req.RemoteBucketID = platform.ID(100)
|
||||
_, err = testStore.CreateReplication(ctx, platform.ID(10), req)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.RemoteBucketID = platform.ID(0)
|
||||
req.RemoteBucketName = "testbucket"
|
||||
req.Name = "namedrepl"
|
||||
_, err = testStore.CreateReplication(ctx, platform.ID(20), req)
|
||||
require.NoError(t, err)
|
||||
|
||||
replications, err := testStore.ListReplications(context.Background(), influxdb.ReplicationListFilter{OrgID: replication.OrgID})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(replications.Replications))
|
||||
|
||||
require.NoError(t, sqliteMigrator.UpUntil(ctx, 8, migrations.AllUp))
|
||||
}
|
||||
|
||||
func TestGetFullHTTPConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
-- Removes the "NOT NULL" from remote_org_id
|
||||
ALTER TABLE remotes RENAME TO _remotes_old;
|
||||
DROP INDEX idx_remote_url_per_org;
|
||||
|
||||
CREATE TABLE remotes (
|
||||
id VARCHAR(16) NOT NULL PRIMARY KEY,
|
||||
|
@ -28,7 +29,6 @@ INSERT INTO remotes (
|
|||
created_at,
|
||||
updated_at
|
||||
) SELECT * FROM _remotes_old;
|
||||
DROP TABLE _remotes_old;
|
||||
-- Create indexes on lookup patterns we expect to be common
|
||||
CREATE INDEX idx_remote_url_per_org ON remotes (org_id, remote_url);
|
||||
|
||||
|
@ -76,6 +76,7 @@ INSERT INTO replications (
|
|||
updated_at
|
||||
) SELECT * FROM _replications_old;
|
||||
DROP TABLE _replications_old;
|
||||
DROP TABLE _remotes_old;
|
||||
|
||||
-- Create indexes on lookup patterns we expect to be common
|
||||
CREATE INDEX idx_local_bucket_id_per_org ON replications (org_id, local_bucket_id);
|
||||
|
|
|
@ -33,6 +33,13 @@ func (m *Migrator) SetBackupPath(path string) {
|
|||
}
|
||||
|
||||
func (m *Migrator) Up(ctx context.Context, source embed.FS) error {
|
||||
return m.UpUntil(ctx, -1, source)
|
||||
}
|
||||
|
||||
// UpUntil migrates until a specific migration.
|
||||
// -1 or 0 will run all migrations, any other number will run up until that.
|
||||
// Returns no error untilMigration is less than the already run migrations.
|
||||
func (m *Migrator) UpUntil(ctx context.Context, untilMigration int, source embed.FS) error {
|
||||
knownMigrations, err := source.ReadDir(".")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -60,7 +67,16 @@ func (m *Migrator) Up(ctx context.Context, source embed.FS) error {
|
|||
}
|
||||
}
|
||||
|
||||
migrationsToDo := len(knownMigrations[lastMigration:])
|
||||
var migrationsToDo int
|
||||
if untilMigration < 1 {
|
||||
migrationsToDo = len(knownMigrations[lastMigration:])
|
||||
untilMigration = len(knownMigrations)
|
||||
} else if untilMigration >= lastMigration {
|
||||
migrationsToDo = len(knownMigrations[lastMigration:untilMigration])
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if migrationsToDo == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@ -85,7 +101,7 @@ func (m *Migrator) Up(ctx context.Context, source embed.FS) error {
|
|||
|
||||
m.log.Info("Bringing up metadata migrations", zap.Int("migration_count", migrationsToDo))
|
||||
|
||||
for _, f := range knownMigrations[lastMigration:] {
|
||||
for _, f := range knownMigrations[lastMigration:untilMigration] {
|
||||
n := f.Name()
|
||||
|
||||
m.log.Debug("Executing metadata migration", zap.String("migration_name", n))
|
||||
|
|
Loading…
Reference in New Issue