Fix panic when pruning shard groups
* Fix #7812 - Panic when pruning shard groups * Update CHANGELOG.mdpull/7824/head
parent
e7b7984c27
commit
f05df2a263
|
@ -42,6 +42,7 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco
|
|||
- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj
|
||||
- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration.
|
||||
- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values.
|
||||
- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes
|
||||
|
||||
## v1.1.1 [2016-12-06]
|
||||
|
||||
|
|
|
@ -676,15 +676,15 @@ func (c *Client) PruneShardGroups() error {
|
|||
data := c.cacheData.Clone()
|
||||
for i, d := range data.Databases {
|
||||
for j, rp := range d.RetentionPolicies {
|
||||
var remainingShardGroups []ShardGroupInfo
|
||||
for _, sgi := range rp.ShardGroups {
|
||||
if sgi.DeletedAt.IsZero() || !expiration.After(sgi.DeletedAt) {
|
||||
remainingShardGroups = append(remainingShardGroups, sgi)
|
||||
continue
|
||||
}
|
||||
// we are safe to delete the shard group as it's been marked deleted for the required expiration
|
||||
s := append(rp.ShardGroups[:i], rp.ShardGroups[i+1:]...)
|
||||
data.Databases[i].RetentionPolicies[j].ShardGroups = s
|
||||
changed = true
|
||||
}
|
||||
data.Databases[i].RetentionPolicies[j].ShardGroups = remainingShardGroups
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
|
|
|
@ -972,10 +972,14 @@ func TestMetaClient_PruneShardGroups(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := c.CreateDatabase("db1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
duration := 1 * time.Hour
|
||||
replicaN := 1
|
||||
|
||||
if _, err := c.CreateRetentionPolicy("db0", &meta.RetentionPolicySpec{
|
||||
if _, err := c.CreateRetentionPolicy("db1", &meta.RetentionPolicySpec{
|
||||
Name: "rp0",
|
||||
Duration: &duration,
|
||||
ReplicaN: &replicaN,
|
||||
|
@ -983,14 +987,21 @@ func TestMetaClient_PruneShardGroups(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sg, err := c.CreateShardGroup("db0", "autogen", time.Now())
|
||||
sg, err := c.CreateShardGroup("db1", "autogen", time.Now())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if sg == nil {
|
||||
t.Fatalf("expected ShardGroup")
|
||||
}
|
||||
|
||||
sg, err = c.CreateShardGroup("db0", "rp0", time.Now())
|
||||
sg, err = c.CreateShardGroup("db1", "autogen", time.Now().Add(15*24*time.Hour))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if sg == nil {
|
||||
t.Fatalf("expected ShardGroup")
|
||||
}
|
||||
|
||||
sg, err = c.CreateShardGroup("db1", "rp0", time.Now())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if sg == nil {
|
||||
|
@ -1000,7 +1011,8 @@ func TestMetaClient_PruneShardGroups(t *testing.T) {
|
|||
expiration := time.Now().Add(-2 * 7 * 24 * time.Hour).Add(-1 * time.Hour)
|
||||
|
||||
data := c.Data()
|
||||
data.Databases[0].RetentionPolicies[0].ShardGroups[0].DeletedAt = expiration
|
||||
data.Databases[1].RetentionPolicies[0].ShardGroups[0].DeletedAt = expiration
|
||||
data.Databases[1].RetentionPolicies[0].ShardGroups[1].DeletedAt = expiration
|
||||
|
||||
if err := c.SetData(&data); err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1011,7 +1023,7 @@ func TestMetaClient_PruneShardGroups(t *testing.T) {
|
|||
}
|
||||
|
||||
data = c.Data()
|
||||
rp, err := data.RetentionPolicy("db0", "autogen")
|
||||
rp, err := data.RetentionPolicy("db1", "autogen")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1019,7 +1031,7 @@ func TestMetaClient_PruneShardGroups(t *testing.T) {
|
|||
t.Fatalf("failed to prune shard group. got: %d, exp: %d", got, exp)
|
||||
}
|
||||
|
||||
rp, err = data.RetentionPolicy("db0", "rp0")
|
||||
rp, err = data.RetentionPolicy("db1", "rp0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue