test: use `T.TempDir` to create temporary test directory (#23258)

* test: use `T.TempDir` to create temporary test directory

This commit replaces `os.MkdirTemp` with `t.TempDir` in tests. The
directory created by `t.TempDir` is automatically removed when the test
and all its subtests complete.

Prior to this commit, temporary directory created using `os.MkdirTemp`
needs to be removed manually by calling `os.RemoveAll`, which is omitted
in some tests. The error handling boilerplate e.g.
	defer func() {
		if err := os.RemoveAll(dir); err != nil {
			t.Fatal(err)
		}
	}
is also tedious, but `t.TempDir` handles this for us nicely.

Reference: https://pkg.go.dev/testing#T.TempDir
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: fix failing TestSendWrite on Windows

=== FAIL: replications/internal TestSendWrite (0.29s)
    logger.go:130: 2022-06-23T13:00:54.290Z	DEBUG	Created new durable queue for replication stream	{"id": "0000000000000001", "path": "C:\\Users\\circleci\\AppData\\Local\\Temp\\TestSendWrite1627281409\\001\\replicationq\\0000000000000001"}
    logger.go:130: 2022-06-23T13:00:54.457Z	ERROR	Error in replication stream	{"replication_id": "0000000000000001", "error": "remote timeout", "retries": 1}
    testing.go:1090: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestSendWrite1627281409\001\replicationq\0000000000000001\1: The process cannot access the file because it is being used by another process.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: fix failing TestStore_BadShard on Windows

=== FAIL: tsdb TestStore_BadShard (0.09s)
    logger.go:130: 2022-06-23T12:18:21.827Z	INFO	Using data dir	{"service": "store", "path": "C:\\Users\\circleci\\AppData\\Local\\Temp\\TestStore_BadShard1363295568\\001"}
    logger.go:130: 2022-06-23T12:18:21.827Z	INFO	Compaction settings	{"service": "store", "max_concurrent_compactions": 2, "throughput_bytes_per_second": 50331648, "throughput_bytes_per_second_burst": 50331648}
    logger.go:130: 2022-06-23T12:18:21.828Z	INFO	Open store (start)	{"service": "store", "op_name": "tsdb_open", "op_event": "start"}
    logger.go:130: 2022-06-23T12:18:21.828Z	INFO	Open store (end)	{"service": "store", "op_name": "tsdb_open", "op_event": "end", "op_elapsed": "77.3µs"}
    testing.go:1090: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestStore_BadShard1363295568\002\data\db0\rp0\1\index\0\L0-00000001.tsl: The process cannot access the file because it is being used by another process.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: fix failing TestPartition_PrependLogFile_Write_Fail and TestPartition_Compact_Write_Fail on Windows

=== FAIL: tsdb/index/tsi1 TestPartition_PrependLogFile_Write_Fail/write_MANIFEST (0.06s)
    testing.go:1090: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestPartition_PrependLogFile_Write_Failwrite_MANIFEST656030081\002\0\L0-00000003.tsl: The process cannot access the file because it is being used by another process.
    --- FAIL: TestPartition_PrependLogFile_Write_Fail/write_MANIFEST (0.06s)

=== FAIL: tsdb/index/tsi1 TestPartition_Compact_Write_Fail/write_MANIFEST (0.08s)
    testing.go:1090: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestPartition_Compact_Write_Failwrite_MANIFEST3398667527\002\0\L0-00000003.tsl: The process cannot access the file because it is being used by another process.
    --- FAIL: TestPartition_Compact_Write_Fail/write_MANIFEST (0.08s)

We must close the open file descriptor otherwise the temporary file
cannot be cleaned up on Windows.

Fixes: 619eb1cae6 ("fix: restore in-memory Manifest on write error")
Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: fix failing TestReplicationStartMissingQueue on Windows

=== FAIL: TestReplicationStartMissingQueue (1.60s)
    logger.go:130: 2023-03-17T10:42:07.269Z	DEBUG	Created new durable queue for replication stream	{"id": "0000000000000001", "path": "C:\\Users\\circleci\\AppData\\Local\\Temp\\TestReplicationStartMissingQueue76668607\\001\\replicationq\\0000000000000001"}
    logger.go:130: 2023-03-17T10:42:07.305Z	INFO	Opened replication stream	{"id": "0000000000000001", "path": "C:\\Users\\circleci\\AppData\\Local\\Temp\\TestReplicationStartMissingQueue76668607\\001\\replicationq\\0000000000000001"}
    testing.go:1206: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestReplicationStartMissingQueue76668607\001\replicationq\0000000000000001\1: The process cannot access the file because it is being used by another process.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: update TestWAL_DiskSize

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

* test: fix failing TestWAL_DiskSize on Windows

=== FAIL: tsdb/engine/tsm1 TestWAL_DiskSize (2.65s)
    testing.go:1206: TempDir RemoveAll cleanup: remove C:\Users\circleci\AppData\Local\Temp\TestWAL_DiskSize2736073801\001\_00006.wal: The process cannot access the file because it is being used by another process.

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>

---------

Signed-off-by: Eng Zer Jun <engzerjun@gmail.com>
pull/24163/head
Eng Zer Jun 2023-03-22 04:22:11 +08:00 committed by GitHub
parent 96d6dc3d82
commit 903d30d658
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 992 additions and 1217 deletions

View File

@ -134,8 +134,7 @@ func TestAnnotationsCRUD(t *testing.T) {
}
t.Run("create annotations", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
tests := []struct {
name string
@ -169,9 +168,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("select with filters", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
populateAnnotationsData(t, svc)
tests := []struct {
@ -335,8 +332,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("get by id", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
anns := populateAnnotationsData(t, svc)
tests := []struct {
@ -383,8 +379,7 @@ func TestAnnotationsCRUD(t *testing.T) {
t.Run("delete multiple with a filter", func(t *testing.T) {
t.Run("delete by stream id", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
populateAnnotationsData(t, svc)
ctx := context.Background()
@ -485,8 +480,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("delete with non-id filters", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
populateAnnotationsData(t, svc)
tests := []struct {
@ -590,8 +584,7 @@ func TestAnnotationsCRUD(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
populateAnnotationsData(t, svc)
err := svc.DeleteAnnotations(ctx, tt.deleteOrgID, tt.filter)
@ -608,8 +601,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("delete a single annotation by id", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ans := populateAnnotationsData(t, svc)
tests := []struct {
@ -652,8 +644,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("update a single annotation by id", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ans := populateAnnotationsData(t, svc)
updatedTime := time.Time{}.Add(time.Minute)
@ -728,8 +719,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("deleted streams cascade to deleted annotations", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
ans := populateAnnotationsData(t, svc)
@ -762,8 +752,7 @@ func TestAnnotationsCRUD(t *testing.T) {
})
t.Run("renamed streams are reflected in subsequent annotation queries", func(t *testing.T) {
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
populateAnnotationsData(t, svc)
@ -817,8 +806,7 @@ func TestAnnotationsCRUD(t *testing.T) {
func TestStreamsCRUDSingle(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
orgID := *influxdbtesting.IDPtr(1)
@ -907,8 +895,7 @@ func TestStreamsCRUDSingle(t *testing.T) {
func TestStreamsCRUDMany(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
@ -1038,10 +1025,10 @@ func assertStreamNames(t *testing.T, want []string, got []influxdb.StoredStream)
require.ElementsMatch(t, want, storedNames)
}
func newTestService(t *testing.T) (*Service, func(t *testing.T)) {
func newTestService(t *testing.T) *Service {
t.Helper()
store, clean := sqlite.NewTestStore(t)
store := sqlite.NewTestStore(t)
ctx := context.Background()
sqliteMigrator := sqlite.NewMigrator(store, zap.NewNop())
@ -1050,5 +1037,5 @@ func newTestService(t *testing.T) (*Service, func(t *testing.T)) {
svc := NewService(store)
return svc, clean
return svc
}

View File

@ -43,16 +43,7 @@ func newTestClient(t *testing.T) (*bolt.Client, func(), error) {
}
func TestClientOpen(t *testing.T) {
tempDir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("unable to create temporary test directory %v", err)
}
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
t.Fatalf("unable to delete temporary test directory %s: %v", tempDir, err)
}
}()
tempDir := t.TempDir()
boltFile := filepath.Join(tempDir, "test", "bolt.db")

View File

@ -55,8 +55,7 @@ func Test_BuildTSI_ShardID_Without_BucketID(t *testing.T) {
}
func Test_BuildTSI_Invalid_Index_Already_Exists(t *testing.T) {
tempDir := newTempDirectory(t, "", "build-tsi")
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1", "index"), 0777)
os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777)
@ -75,8 +74,7 @@ func Test_BuildTSI_Invalid_Index_Already_Exists(t *testing.T) {
}
func Test_BuildTSI_Valid(t *testing.T) {
tempDir := newTempDirectory(t, "", "build-tsi")
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777)
os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777)
@ -119,8 +117,7 @@ func Test_BuildTSI_Valid(t *testing.T) {
}
func Test_BuildTSI_Valid_Batch_Size_Exceeded(t *testing.T) {
tempDir := newTempDirectory(t, "", "build-tsi")
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777)
os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777)
@ -164,8 +161,7 @@ func Test_BuildTSI_Valid_Batch_Size_Exceeded(t *testing.T) {
func Test_BuildTSI_Valid_Verbose(t *testing.T) {
// Set up temp directory structure
tempDir := newTempDirectory(t, "", "build-tsi")
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
os.MkdirAll(filepath.Join(tempDir, "data", "12345", "autogen", "1"), 0777)
os.MkdirAll(filepath.Join(tempDir, "wal", "12345", "autogen", "1"), 0777)
@ -231,8 +227,7 @@ func Test_BuildTSI_Valid_Compact_Series(t *testing.T) {
t.Skip("mmap implementation on Windows prevents series-file from shrinking during compaction")
}
tempDir := newTempDirectory(t, "", "build-tsi")
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
os.MkdirAll(filepath.Join(tempDir, "data", "12345", "_series"), 0777)
@ -395,15 +390,6 @@ func runCommand(t *testing.T, params cmdParams, outs cmdOuts) {
}
}
func newTempDirectory(t *testing.T, parentDir string, dirName string) string {
t.Helper()
dir, err := os.MkdirTemp(parentDir, dirName)
require.NoError(t, err)
return dir
}
func newTempTsmFile(t *testing.T, path string, values []tsm1.Value) {
t.Helper()

View File

@ -12,8 +12,7 @@ import (
)
func Test_DeleteTSM_EmptyFile(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{})
defer os.RemoveAll(dir)
_, file := createTSMFile(t, tsmParams{})
runCommand(t, testParams{
file: file,
@ -23,10 +22,9 @@ func Test_DeleteTSM_EmptyFile(t *testing.T) {
}
func Test_DeleteTSM_WrongExt(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{
_, file := createTSMFile(t, tsmParams{
improperExt: true,
})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: file,
@ -37,7 +35,6 @@ func Test_DeleteTSM_WrongExt(t *testing.T) {
func Test_DeleteTSM_NotFile(t *testing.T) {
dir, _ := createTSMFile(t, tsmParams{})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: dir,
@ -47,10 +44,9 @@ func Test_DeleteTSM_NotFile(t *testing.T) {
}
func Test_DeleteTSM_SingleEntry_Valid(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{
_, file := createTSMFile(t, tsmParams{
keys: []string{"cpu"},
})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: file,
@ -60,11 +56,10 @@ func Test_DeleteTSM_SingleEntry_Valid(t *testing.T) {
}
func Test_DeleteTSM_SingleEntry_Invalid(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{
_, file := createTSMFile(t, tsmParams{
invalid: true,
keys: []string{"cpu"},
})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: file,
@ -74,10 +69,9 @@ func Test_DeleteTSM_SingleEntry_Invalid(t *testing.T) {
}
func Test_DeleteTSM_ManyEntries_Valid(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{
_, file := createTSMFile(t, tsmParams{
keys: []string{"cpu", "foobar", "mem"},
})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: file,
@ -86,11 +80,10 @@ func Test_DeleteTSM_ManyEntries_Valid(t *testing.T) {
}
func Test_DeleteTSM_ManyEntries_Invalid(t *testing.T) {
dir, file := createTSMFile(t, tsmParams{
_, file := createTSMFile(t, tsmParams{
invalid: true,
keys: []string{"cpu", "foobar", "mem"},
})
defer os.RemoveAll(dir)
runCommand(t, testParams{
file: file,
@ -154,10 +147,10 @@ type tsmParams struct {
func createTSMFile(t *testing.T, params tsmParams) (string, string) {
t.Helper()
dir, err := os.MkdirTemp("", "deletetsm")
require.NoError(t, err)
dir := t.TempDir()
var file *os.File
var err error
if !params.improperExt {
file, err = os.CreateTemp(dir, "*."+tsm1.TSMFileExtension)
} else {

View File

@ -19,9 +19,7 @@ func Test_DumpTSI_NoError(t *testing.T) {
cmd.SetOut(b)
// Create the temp-dir for our un-tared files to live in
dir, err := os.MkdirTemp("", "dumptsitest-")
require.NoError(t, err)
defer os.RemoveAll(dir)
dir := t.TempDir()
// Untar the test data
file, err := os.Open("../tsi-test-data.tar.gz")

View File

@ -21,8 +21,7 @@ func Test_DumpTSM_NoFile(t *testing.T) {
}
func Test_DumpTSM_EmptyFile(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{})
defer os.RemoveAll(dir)
_, file := makeTSMFile(t, tsmParams{})
runCommand(t, cmdParams{
file: file,
@ -32,10 +31,9 @@ func Test_DumpTSM_EmptyFile(t *testing.T) {
}
func Test_DumpTSM_WrongExt(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{
_, file := makeTSMFile(t, tsmParams{
wrongExt: true,
})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: file,
@ -46,7 +44,6 @@ func Test_DumpTSM_WrongExt(t *testing.T) {
func Test_DumpTSM_NotFile(t *testing.T) {
dir, _ := makeTSMFile(t, tsmParams{})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: dir,
@ -56,10 +53,9 @@ func Test_DumpTSM_NotFile(t *testing.T) {
}
func Test_DumpTSM_Valid(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{
_, file := makeTSMFile(t, tsmParams{
keys: []string{"cpu"},
})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: file,
@ -72,11 +68,10 @@ func Test_DumpTSM_Valid(t *testing.T) {
}
func Test_DumpTSM_Invalid(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{
_, file := makeTSMFile(t, tsmParams{
invalid: true,
keys: []string{"cpu"},
})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: file,
@ -86,10 +81,9 @@ func Test_DumpTSM_Invalid(t *testing.T) {
}
func Test_DumpTSM_ManyKeys(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{
_, file := makeTSMFile(t, tsmParams{
keys: []string{"cpu", "foobar", "mem"},
})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: file,
@ -103,10 +97,9 @@ func Test_DumpTSM_ManyKeys(t *testing.T) {
}
func Test_DumpTSM_FilterKey(t *testing.T) {
dir, file := makeTSMFile(t, tsmParams{
_, file := makeTSMFile(t, tsmParams{
keys: []string{"cpu", "foobar", "mem"},
})
defer os.RemoveAll(dir)
runCommand(t, cmdParams{
file: file,
@ -187,8 +180,7 @@ type tsmParams struct {
func makeTSMFile(t *testing.T, params tsmParams) (string, string) {
t.Helper()
dir, err := os.MkdirTemp("", "dumptsm")
require.NoError(t, err)
dir := t.TempDir()
ext := tsm1.TSMFileExtension
if params.wrongExt {

View File

@ -36,8 +36,7 @@ func Test_DumpWal_Bad_Path(t *testing.T) {
func Test_DumpWal_Wrong_File_Type(t *testing.T) {
// Creates a temporary .txt file (wrong extension)
dir, file := newTempWal(t, false, false)
defer os.RemoveAll(dir)
file := newTempWal(t, false, false)
params := cmdParams{
walPaths: []string{file},
@ -48,8 +47,7 @@ func Test_DumpWal_Wrong_File_Type(t *testing.T) {
}
func Test_DumpWal_File_Valid(t *testing.T) {
dir, file := newTempWal(t, true, false)
defer os.RemoveAll(dir)
file := newTempWal(t, true, false)
params := cmdParams{
walPaths: []string{file},
@ -67,8 +65,7 @@ func Test_DumpWal_File_Valid(t *testing.T) {
}
func Test_DumpWal_Find_Duplicates_None(t *testing.T) {
dir, file := newTempWal(t, true, false)
defer os.RemoveAll(dir)
file := newTempWal(t, true, false)
params := cmdParams{
findDuplicates: true,
@ -80,8 +77,7 @@ func Test_DumpWal_Find_Duplicates_None(t *testing.T) {
}
func Test_DumpWal_Find_Duplicates_Present(t *testing.T) {
dir, file := newTempWal(t, true, true)
defer os.RemoveAll(dir)
file := newTempWal(t, true, true)
params := cmdParams{
findDuplicates: true,
@ -92,21 +88,25 @@ func Test_DumpWal_Find_Duplicates_Present(t *testing.T) {
runCommand(t, params)
}
func newTempWal(t *testing.T, validExt bool, withDuplicate bool) (string, string) {
func newTempWal(t *testing.T, validExt bool, withDuplicate bool) string {
t.Helper()
dir, err := os.MkdirTemp("", "dump-wal")
require.NoError(t, err)
var file *os.File
dir := t.TempDir()
if !validExt {
file, err := os.CreateTemp(dir, "dumpwaltest*.txt")
require.NoError(t, err)
return dir, file.Name()
t.Cleanup(func() {
file.Close()
})
return file.Name()
}
file, err = os.CreateTemp(dir, "dumpwaltest*"+"."+tsm1.WALFileExtension)
file, err := os.CreateTemp(dir, "dumpwaltest*"+"."+tsm1.WALFileExtension)
require.NoError(t, err)
t.Cleanup(func() {
file.Close()
})
p1 := tsm1.NewValue(10, 1.1)
p2 := tsm1.NewValue(1, int64(1))
@ -132,7 +132,7 @@ func newTempWal(t *testing.T, validExt bool, withDuplicate bool) (string, string
// Write to WAL File
writeWalFile(t, file, values)
return dir, file.Name()
return file.Name()
}
func writeWalFile(t *testing.T, file *os.File, vals map[string][]tsm1.Value) {

View File

@ -34,10 +34,7 @@ const (
func Test_ReportTSI_GeneratedData(t *testing.T) {
shardlessPath := newTempDirectories(t, false)
defer os.RemoveAll(shardlessPath)
shardPath := newTempDirectories(t, true)
defer os.RemoveAll(shardPath)
tests := []cmdParams{
{
@ -69,9 +66,7 @@ func Test_ReportTSI_GeneratedData(t *testing.T) {
func Test_ReportTSI_TestData(t *testing.T) {
// Create temp directory for extracted test data
path, err := os.MkdirTemp("", "report-tsi-test-")
require.NoError(t, err)
defer os.RemoveAll(path)
path := t.TempDir()
// Extract test data
file, err := os.Open("../tsi-test-data.tar.gz")
@ -125,10 +120,9 @@ func Test_ReportTSI_TestData(t *testing.T) {
func newTempDirectories(t *testing.T, withShards bool) string {
t.Helper()
dataDir, err := os.MkdirTemp("", "reporttsi")
require.NoError(t, err)
dataDir := t.TempDir()
err = os.MkdirAll(filepath.Join(dataDir, bucketID, "autogen"), 0777)
err := os.MkdirAll(filepath.Join(dataDir, bucketID, "autogen"), 0777)
require.NoError(t, err)
if withShards {

View File

@ -13,16 +13,15 @@ import (
)
func Test_Invalid_NotDir(t *testing.T) {
dir, err := os.MkdirTemp("", "")
require.NoError(t, err)
dir := t.TempDir()
file, err := os.CreateTemp(dir, "")
require.NoError(t, err)
defer os.RemoveAll(dir)
runCommand(t, testInfo{
dir: file.Name(),
expectOut: []string{"Files: 0"},
})
require.NoError(t, file.Close())
}
func Test_Invalid_EmptyDir(t *testing.T) {

View File

@ -76,11 +76,10 @@ type Test struct {
func NewTest(t *testing.T) *Test {
t.Helper()
dir, err := os.MkdirTemp("", "verify-seriesfile-")
require.NoError(t, err)
dir := t.TempDir()
// create a series file in the directory
err = func() error {
err := func() error {
seriesFile := tsdb.NewSeriesFile(dir)
if err := seriesFile.Open(); err != nil {
return err
@ -128,7 +127,6 @@ func NewTest(t *testing.T) *Test {
return seriesFile.Close()
}()
if err != nil {
os.RemoveAll(dir)
t.Fatal(err)
}

View File

@ -21,12 +21,11 @@ const (
// Run tests on a directory with no Tombstone files
func TestVerifies_InvalidFileType(t *testing.T) {
path, err := os.MkdirTemp("", "verify-tombstone")
require.NoError(t, err)
path := t.TempDir()
_, err = os.CreateTemp(path, "verifytombstonetest*"+".txt")
f, err := os.CreateTemp(path, "verifytombstonetest*"+".txt")
require.NoError(t, err)
defer os.RemoveAll(path)
require.NoError(t, f.Close())
verify := NewVerifyTombstoneCommand()
verify.SetArgs([]string{"--engine-path", path})
@ -43,7 +42,6 @@ func TestVerifies_InvalidFileType(t *testing.T) {
// Run tests on an empty Tombstone file (treated as v1)
func TestVerifies_InvalidEmptyFile(t *testing.T) {
path, _ := NewTempTombstone(t)
defer os.RemoveAll(path)
verify := NewVerifyTombstoneCommand()
verify.SetArgs([]string{"--engine-path", path})
@ -60,7 +58,6 @@ func TestVerifies_InvalidEmptyFile(t *testing.T) {
// Runs tests on an invalid V2 Tombstone File
func TestVerifies_InvalidV2(t *testing.T) {
path, file := NewTempTombstone(t)
defer os.RemoveAll(path)
WriteTombstoneHeader(t, file, v2header)
WriteBadData(t, file)
@ -74,7 +71,6 @@ func TestVerifies_InvalidV2(t *testing.T) {
func TestVerifies_ValidTS(t *testing.T) {
path, file := NewTempTombstone(t)
defer os.RemoveAll(path)
ts := tsm1.NewTombstoner(file.Name(), nil)
require.NoError(t, ts.Add([][]byte{[]byte("foobar")}))
@ -90,7 +86,6 @@ func TestVerifies_ValidTS(t *testing.T) {
// Runs tests on an invalid V3 Tombstone File
func TestVerifies_InvalidV3(t *testing.T) {
path, file := NewTempTombstone(t)
defer os.RemoveAll(path)
WriteTombstoneHeader(t, file, v3header)
WriteBadData(t, file)
@ -105,7 +100,6 @@ func TestVerifies_InvalidV3(t *testing.T) {
// Runs tests on an invalid V4 Tombstone File
func TestVerifies_InvalidV4(t *testing.T) {
path, file := NewTempTombstone(t)
defer os.RemoveAll(path)
WriteTombstoneHeader(t, file, v4header)
WriteBadData(t, file)
@ -121,7 +115,6 @@ func TestVerifies_InvalidV4(t *testing.T) {
// is not needed, but was part of old command.
func TestTombstone_VeryVeryVerbose(t *testing.T) {
path, file := NewTempTombstone(t)
defer os.RemoveAll(path)
WriteTombstoneHeader(t, file, v4header)
WriteBadData(t, file)
@ -136,8 +129,7 @@ func TestTombstone_VeryVeryVerbose(t *testing.T) {
func NewTempTombstone(t *testing.T) (string, *os.File) {
t.Helper()
dir, err := os.MkdirTemp("", "verify-tombstone")
require.NoError(t, err)
dir := t.TempDir()
file, err := os.CreateTemp(dir, "verifytombstonetest*"+"."+tsm1.TombstoneFileExtension)
require.NoError(t, err)

View File

@ -69,8 +69,7 @@ func TestValidUTF8(t *testing.T) {
func newUTFTest(t *testing.T, withError bool) string {
t.Helper()
dir, err := os.MkdirTemp("", "verify-tsm")
require.NoError(t, err)
dir := t.TempDir()
f, err := os.CreateTemp(dir, "verifytsmtest*"+"."+tsm1.TSMFileExtension)
require.NoError(t, err)
@ -94,8 +93,7 @@ func newUTFTest(t *testing.T, withError bool) string {
func newChecksumTest(t *testing.T, withError bool) string {
t.Helper()
dir, err := os.MkdirTemp("", "verify-tsm")
require.NoError(t, err)
dir := t.TempDir()
f, err := os.CreateTemp(dir, "verifytsmtest*"+"."+tsm1.TSMFileExtension)
require.NoError(t, err)

View File

@ -109,6 +109,8 @@ func (a args) Run(cmd *cobra.Command) error {
}
totalEntriesScanned += entriesScanned
_ = tw.Flush()
_ = reader.Close()
}
// Print Summary

View File

@ -21,12 +21,11 @@ type testInfo struct {
}
func TestVerifies_InvalidFileType(t *testing.T) {
path, err := os.MkdirTemp("", "verify-wal")
require.NoError(t, err)
path := t.TempDir()
_, err = os.CreateTemp(path, "verifywaltest*"+".txt")
f, err := os.CreateTemp(path, "verifywaltest*"+".txt")
require.NoError(t, err)
defer os.RemoveAll(path)
require.NoError(t, f.Close())
runCommand(testInfo{
t: t,
@ -37,8 +36,7 @@ func TestVerifies_InvalidFileType(t *testing.T) {
}
func TestVerifies_InvalidNotDir(t *testing.T) {
path, file := newTempWALInvalid(t, true)
defer os.RemoveAll(path)
_, file := newTempWALInvalid(t, true)
runCommand(testInfo{
t: t,
@ -50,7 +48,6 @@ func TestVerifies_InvalidNotDir(t *testing.T) {
func TestVerifies_InvalidEmptyFile(t *testing.T) {
path, _ := newTempWALInvalid(t, true)
defer os.RemoveAll(path)
runCommand(testInfo{
t: t,
@ -62,7 +59,6 @@ func TestVerifies_InvalidEmptyFile(t *testing.T) {
func TestVerifies_Invalid(t *testing.T) {
path, _ := newTempWALInvalid(t, false)
defer os.RemoveAll(path)
runCommand(testInfo{
t: t,
@ -74,7 +70,6 @@ func TestVerifies_Invalid(t *testing.T) {
func TestVerifies_Valid(t *testing.T) {
path := newTempWALValid(t)
defer os.RemoveAll(path)
runCommand(testInfo{
t: t,
@ -108,12 +103,13 @@ func runCommand(args testInfo) {
func newTempWALValid(t *testing.T) string {
t.Helper()
dir, err := os.MkdirTemp("", "verify-wal")
require.NoError(t, err)
dir := t.TempDir()
w := tsm1.NewWAL(dir, 0, 0, tsdb.EngineTags{})
defer w.Close()
require.NoError(t, w.Open())
t.Cleanup(func() {
require.NoError(t, w.Close())
})
p1 := tsm1.NewValue(1, 1.1)
p2 := tsm1.NewValue(1, int64(1))
@ -129,7 +125,7 @@ func newTempWALValid(t *testing.T) string {
"cpu,host=A#!~#unsigned": {p5},
}
_, err = w.WriteMulti(context.Background(), values)
_, err := w.WriteMulti(context.Background(), values)
require.NoError(t, err)
return dir
@ -138,18 +134,14 @@ func newTempWALValid(t *testing.T) string {
func newTempWALInvalid(t *testing.T, empty bool) (string, *os.File) {
t.Helper()
dir, err := os.MkdirTemp("", "verify-wal")
require.NoError(t, err)
dir := t.TempDir()
file, err := os.CreateTemp(dir, "verifywaltest*."+tsm1.WALFileExtension)
require.NoError(t, err)
t.Cleanup(func() { file.Close() })
if !empty {
writer, err := os.OpenFile(file.Name(), os.O_APPEND|os.O_WRONLY, 0644)
require.NoError(t, err)
defer writer.Close()
written, err := writer.Write([]byte("foobar"))
written, err := file.Write([]byte("foobar"))
require.NoError(t, err)
require.Equal(t, 6, written)
}

View File

@ -2,7 +2,6 @@ package launcher_test
import (
"context"
"os"
"testing"
"github.com/influxdata/influx-cli/v2/clients/backup"
@ -18,9 +17,7 @@ func TestBackupRestore_Full(t *testing.T) {
t.Parallel()
ctx := context.Background()
backupDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(backupDir)
backupDir := t.TempDir()
// Boot a server, write some data, and take a backup.
l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) {
@ -83,7 +80,7 @@ func TestBackupRestore_Full(t *testing.T) {
l2.ResetHTTPCLient()
// Check that orgs and buckets were reset to match the original server's metadata.
_, err = l2.OrgService(t).FindOrganizationByID(ctx, l2.Org.ID)
_, err := l2.OrgService(t).FindOrganizationByID(ctx, l2.Org.ID)
require.Equal(t, errors.ENotFound, errors.ErrorCode(err))
rbkt1, err := l2.BucketService(t).FindBucket(ctx, influxdb.BucketFilter{OrganizationID: &l1.Org.ID, ID: &l1.Bucket.ID})
require.NoError(t, err)
@ -116,9 +113,7 @@ func TestBackupRestore_Partial(t *testing.T) {
t.Parallel()
ctx := context.Background()
backupDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(backupDir)
backupDir := t.TempDir()
// Boot a server, write some data, and take a backup.
l1 := launcher.RunAndSetupNewLauncherOrFail(ctx, t, func(o *launcher.InfluxdOpts) {

View File

@ -12,13 +12,9 @@ import (
)
func TestCopyDirAndDirSize(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "tcd")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
err = os.MkdirAll(filepath.Join(tmpdir, "1", "1", "1"), 0700)
err := os.MkdirAll(filepath.Join(tmpdir, "1", "1", "1"), 0700)
if err != nil {
t.Fatal(err)
}
@ -49,11 +45,7 @@ func TestCopyDirAndDirSize(t *testing.T) {
}
assert.Equal(t, uint64(1600), size)
targetDir, err := os.MkdirTemp("", "tcd")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(targetDir)
targetDir := t.TempDir()
targetDir = filepath.Join(targetDir, "x")
err = CopyDir(tmpdir, targetDir, nil, func(path string) bool {
base := filepath.Base(path)

View File

@ -29,10 +29,7 @@ import (
)
func TestPathValidations(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
require.Nil(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
v1Dir := filepath.Join(tmpdir, "v1db")
v2Dir := filepath.Join(tmpdir, "v2db")
@ -41,7 +38,7 @@ func TestPathValidations(t *testing.T) {
configsPath := filepath.Join(v2Dir, "configs")
enginePath := filepath.Join(v2Dir, "engine")
err = os.MkdirAll(filepath.Join(enginePath, "db"), 0777)
err := os.MkdirAll(filepath.Join(enginePath, "db"), 0777)
require.Nil(t, err)
sourceOpts := &optionsV1{
@ -89,10 +86,7 @@ func TestPathValidations(t *testing.T) {
}
func TestClearTargetPaths(t *testing.T) {
tmpdir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
tmpdir := t.TempDir()
v2Dir := filepath.Join(tmpdir, "v2db")
boltPath := filepath.Join(v2Dir, bolt.DefaultFilename)
@ -101,7 +95,7 @@ func TestClearTargetPaths(t *testing.T) {
cqPath := filepath.Join(v2Dir, "cqs")
configPath := filepath.Join(v2Dir, "config")
err = os.MkdirAll(filepath.Join(enginePath, "db"), 0777)
err := os.MkdirAll(filepath.Join(enginePath, "db"), 0777)
require.NoError(t, err)
err = os.WriteFile(boltPath, []byte{1}, 0777)
require.NoError(t, err)
@ -176,11 +170,9 @@ func TestDbURL(t *testing.T) {
func TestUpgradeRealDB(t *testing.T) {
ctx := context.Background()
tmpdir, err := os.MkdirTemp("", "")
require.NoError(t, err)
tmpdir := t.TempDir()
defer os.RemoveAll(tmpdir)
err = testutil.Unzip(filepath.Join("testdata", "v1db.zip"), tmpdir)
err := testutil.Unzip(filepath.Join("testdata", "v1db.zip"), tmpdir)
require.NoError(t, err)
v1ConfigPath := filepath.Join(tmpdir, "v1.conf")

View File

@ -184,9 +184,7 @@ func Test_NewProgram(t *testing.T) {
for _, tt := range tests {
for _, writer := range configWriters {
fn := func(t *testing.T) {
testDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
confFile, err := writer.writeFn(testDir, config)
require.NoError(t, err)
@ -286,9 +284,12 @@ func writeTomlConfig(dir string, config interface{}) (string, error) {
if err != nil {
return "", err
}
defer w.Close()
if err := toml.NewEncoder(w).Encode(config); err != nil {
return "", err
}
return confFile, nil
}
@ -304,9 +305,12 @@ func yamlConfigWriter(shortExt bool) configWriter {
if err != nil {
return "", err
}
defer w.Close()
if err := yaml.NewEncoder(w).Encode(config); err != nil {
return "", err
}
return confFile, nil
}
}
@ -382,9 +386,7 @@ func Test_ConfigPrecedence(t *testing.T) {
for _, tt := range tests {
fn := func(t *testing.T) {
testDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
defer setEnvVar("TEST_CONFIG_PATH", testDir)()
if tt.writeJson {
@ -429,9 +431,7 @@ func Test_ConfigPrecedence(t *testing.T) {
}
func Test_ConfigPathDotDirectory(t *testing.T) {
testDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
tests := []struct {
name string
@ -460,7 +460,7 @@ func Test_ConfigPathDotDirectory(t *testing.T) {
configDir := filepath.Join(testDir, tc.dir)
require.NoError(t, os.Mkdir(configDir, 0700))
_, err = writeTomlConfig(configDir, config)
_, err := writeTomlConfig(configDir, config)
require.NoError(t, err)
defer setEnvVar("TEST_CONFIG_PATH", configDir)()
@ -487,9 +487,7 @@ func Test_ConfigPathDotDirectory(t *testing.T) {
}
func Test_LoadConfigCwd(t *testing.T) {
testDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(testDir)
testDir := t.TempDir()
pwd, err := os.Getwd()
require.NoError(t, err)

View File

@ -20,8 +20,7 @@ var (
func TestCreateAndGetNotebook(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
// getting an invalid id should return an error
@ -59,8 +58,7 @@ func TestCreateAndGetNotebook(t *testing.T) {
func TestUpdate(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
testCreate := &influxdb.NotebookReqBody{
@ -108,8 +106,7 @@ func TestUpdate(t *testing.T) {
func TestDelete(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
// attempting to delete a non-existant notebook should return an error
@ -145,8 +142,7 @@ func TestDelete(t *testing.T) {
func TestList(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
ctx := context.Background()
orgID := idGen.ID()
@ -195,8 +191,8 @@ func TestList(t *testing.T) {
}
}
func newTestService(t *testing.T) (*Service, func(t *testing.T)) {
store, clean := sqlite.NewTestStore(t)
func newTestService(t *testing.T) *Service {
store := sqlite.NewTestStore(t)
ctx := context.Background()
sqliteMigrator := sqlite.NewMigrator(store, zap.NewNop())
@ -205,5 +201,5 @@ func newTestService(t *testing.T) (*Service, func(t *testing.T)) {
svc := NewService(store)
return svc, clean
return svc
}

View File

@ -605,11 +605,7 @@ func ReadSegment(segment *segment) string {
}
func TestSegment_repair(t *testing.T) {
dir, err := os.MkdirTemp("", "hh_queue")
if err != nil {
t.Fatalf("failed to create temp dir: %v", err)
}
defer os.RemoveAll(dir)
dir := t.TempDir()
examples := []struct {
In *TestSegment
@ -703,6 +699,9 @@ func TestSegment_repair(t *testing.T) {
example.VerifyFn = func([]byte) error { return nil }
}
segment := mustCreateSegment(example.In, dir, example.VerifyFn)
t.Cleanup(func() {
segment.close()
})
if got, exp := ReadSegment(segment), example.Expected.String(); got != exp {
t.Errorf("[example %d]\ngot: %s\nexp: %s\n\n", i+1, got, exp)

View File

@ -57,8 +57,7 @@ var (
func TestCreateAndGetConnection(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
// Getting an invalid ID should return an error.
got, err := svc.GetRemoteConnection(ctx, initID)
@ -79,8 +78,7 @@ func TestCreateAndGetConnection(t *testing.T) {
func TestUpdateAndGetConnection(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
// Updating a nonexistent ID fails.
updated, err := svc.UpdateRemoteConnection(ctx, initID, updateReq)
@ -106,8 +104,7 @@ func TestUpdateAndGetConnection(t *testing.T) {
func TestUpdateNoop(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
// Create a connection.
created, err := svc.CreateRemoteConnection(ctx, createReq)
@ -128,8 +125,7 @@ func TestUpdateNoop(t *testing.T) {
func TestDeleteConnection(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
// Deleting a nonexistent ID should return an error.
require.Equal(t, errRemoteNotFound, svc.DeleteRemoteConnection(ctx, initID))
@ -167,8 +163,7 @@ func TestListConnections(t *testing.T) {
t.Run("list all", func(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
allConns := setup(t, svc)
listed, err := svc.ListRemoteConnections(ctx, influxdb.RemoteConnectionListFilter{OrgID: connection.OrgID})
@ -179,8 +174,7 @@ func TestListConnections(t *testing.T) {
t.Run("list by name", func(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
allConns := setup(t, svc)
listed, err := svc.ListRemoteConnections(ctx, influxdb.RemoteConnectionListFilter{
@ -194,8 +188,7 @@ func TestListConnections(t *testing.T) {
t.Run("list by URL", func(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
allConns := setup(t, svc)
listed, err := svc.ListRemoteConnections(ctx, influxdb.RemoteConnectionListFilter{
@ -209,8 +202,7 @@ func TestListConnections(t *testing.T) {
t.Run("list by other org ID", func(t *testing.T) {
t.Parallel()
svc, clean := newTestService(t)
defer clean(t)
svc := newTestService(t)
setup(t, svc)
listed, err := svc.ListRemoteConnections(ctx, influxdb.RemoteConnectionListFilter{OrgID: platform.ID(1000)})
@ -219,8 +211,8 @@ func TestListConnections(t *testing.T) {
})
}
func newTestService(t *testing.T) (*service, func(t *testing.T)) {
store, clean := sqlite.NewTestStore(t)
func newTestService(t *testing.T) *service {
store := sqlite.NewTestStore(t)
logger := zaptest.NewLogger(t)
sqliteMigrator := sqlite.NewMigrator(store, logger)
require.NoError(t, sqliteMigrator.Up(ctx, migrations.AllUp))
@ -230,5 +222,5 @@ func newTestService(t *testing.T) (*service, func(t *testing.T)) {
idGenerator: mock.NewIncrementingIDGenerator(initID),
}
return &svc, clean
return &svc
}

View File

@ -35,12 +35,13 @@ func TestCreateNewQueueDirExists(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
require.NoError(t, err)
require.DirExists(t, filepath.Join(queuePath, id1.String()))
shutdown(t, qm)
}
func TestEnqueueScan(t *testing.T) {
@ -78,9 +79,10 @@ func TestEnqueueScan(t *testing.T) {
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
_, qm := initQueueManager(t)
// Create new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -97,6 +99,9 @@ func TestEnqueueScan(t *testing.T) {
// Check queue position
closeRq(rq)
scan, err := rq.queue.NewScanner()
t.Cleanup(func() {
require.NoError(t, rq.queue.Close())
})
if tt.writeFuncReturn == nil {
require.ErrorIs(t, err, io.EOF)
@ -115,8 +120,7 @@ func TestEnqueueScan(t *testing.T) {
func TestCreateNewQueueDuplicateID(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
_, qm := initQueueManager(t)
// Create a valid new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -125,13 +129,14 @@ func TestCreateNewQueueDuplicateID(t *testing.T) {
// Try to initialize another queue with the same replication ID
err = qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
require.EqualError(t, err, "durable queue already exists for replication ID \"0000000000000001\"")
shutdown(t, qm)
}
func TestDeleteQueueDirRemoved(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create a valid new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -147,8 +152,7 @@ func TestDeleteQueueDirRemoved(t *testing.T) {
func TestDeleteQueueNonexistentID(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
_, qm := initQueueManager(t)
// Delete nonexistent queue
err := qm.DeleteQueue(id1)
@ -158,8 +162,7 @@ func TestDeleteQueueNonexistentID(t *testing.T) {
func TestUpdateMaxQueueSizeNonexistentID(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
_, qm := initQueueManager(t)
// Update nonexistent queue
err := qm.UpdateMaxQueueSize(id1, influxdb.DefaultReplicationMaxQueueSizeBytes)
@ -170,7 +173,6 @@ func TestStartReplicationQueue(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -199,13 +201,14 @@ func TestStartReplicationQueue(t *testing.T) {
// Ensure queue is open by trying to remove, will error if open
err = qm.replicationQueues[id1].queue.Remove()
require.Errorf(t, err, "queue is open")
require.NoError(t, qm.replicationQueues[id1].queue.Close())
}
func TestStartReplicationQueuePartialDelete(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -233,7 +236,6 @@ func TestStartReplicationQueuesMultiple(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create queue1
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -280,13 +282,15 @@ func TestStartReplicationQueuesMultiple(t *testing.T) {
require.Errorf(t, err, "queue is open")
err = qm.replicationQueues[id2].queue.Remove()
require.Errorf(t, err, "queue is open")
require.NoError(t, qm.replicationQueues[id1].queue.Close())
require.NoError(t, qm.replicationQueues[id2].queue.Close())
}
func TestStartReplicationQueuesMultipleWithPartialDelete(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create queue1
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -325,14 +329,14 @@ func TestStartReplicationQueuesMultipleWithPartialDelete(t *testing.T) {
// Ensure queue1 is open by trying to remove, will error if open
err = qm.replicationQueues[id1].queue.Remove()
require.Errorf(t, err, "queue is open")
require.NoError(t, qm.replicationQueues[id1].queue.Close())
}
func initQueueManager(t *testing.T) (string, *durableQueueManager) {
t.Helper()
enginePath, err := os.MkdirTemp("", "engine")
require.NoError(t, err)
queuePath := filepath.Join(enginePath, "replicationq")
queuePath := filepath.Join(t.TempDir(), "replicationq")
logger := zaptest.NewLogger(t)
qm := NewDurableQueueManager(logger, queuePath, metrics.NewReplicationsMetrics(), replicationsMock.NewMockHttpConfigStore(nil))
@ -403,9 +407,7 @@ func getTestRemoteWriter(t *testing.T, expected string) remoteWriter {
func TestEnqueueData(t *testing.T) {
t.Parallel()
queuePath, err := os.MkdirTemp("", "testqueue")
require.NoError(t, err)
defer os.RemoveAll(queuePath)
queuePath := t.TempDir()
logger := zaptest.NewLogger(t)
qm := NewDurableQueueManager(logger, queuePath, metrics.NewReplicationsMetrics(), replicationsMock.NewMockHttpConfigStore(nil))
@ -429,6 +431,9 @@ func TestEnqueueData(t *testing.T) {
rq, ok := qm.replicationQueues[id1]
require.True(t, ok)
closeRq(rq)
t.Cleanup(func() {
require.NoError(t, rq.queue.Close())
})
go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock
require.NoError(t, qm.EnqueueData(id1, []byte(data), 1))
@ -460,7 +465,6 @@ func TestSendWrite(t *testing.T) {
}
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
require.DirExists(t, filepath.Join(path, id1.String()))
@ -468,6 +472,9 @@ func TestSendWrite(t *testing.T) {
rq, ok := qm.replicationQueues[id1]
require.True(t, ok)
closeRq(rq)
t.Cleanup(func() {
require.NoError(t, rq.queue.Close())
})
go func() { <-rq.receive }() // absorb the receive to avoid testcase deadlock
// Create custom remote writer that does some expected behavior
@ -547,7 +554,6 @@ func TestEnqueueData_WithMetrics(t *testing.T) {
t.Parallel()
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
require.DirExists(t, filepath.Join(path, id1.String()))
@ -555,6 +561,9 @@ func TestEnqueueData_WithMetrics(t *testing.T) {
rq, ok := qm.replicationQueues[id1]
require.True(t, ok)
closeRq(rq)
t.Cleanup(func() {
require.NoError(t, rq.queue.Close())
})
reg := prom.NewRegistry(zaptest.NewLogger(t))
reg.MustRegister(qm.metrics.PrometheusCollectors()...)
@ -589,7 +598,6 @@ func TestEnqueueData_EnqueueFailure(t *testing.T) {
t.Parallel()
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
require.DirExists(t, filepath.Join(path, id1.String()))
@ -622,7 +630,6 @@ func TestGoroutineReceives(t *testing.T) {
t.Parallel()
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
require.DirExists(t, filepath.Join(path, id1.String()))
@ -630,6 +637,9 @@ func TestGoroutineReceives(t *testing.T) {
require.True(t, ok)
require.NotNil(t, rq)
closeRq(rq) // atypical from normal behavior, but lets us receive channels to test
t.Cleanup(func() {
require.NoError(t, rq.queue.Close())
})
go func() { require.NoError(t, qm.EnqueueData(id1, []byte("1234"), 1)) }()
select {
@ -645,7 +655,6 @@ func TestGoroutineCloses(t *testing.T) {
t.Parallel()
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
require.DirExists(t, filepath.Join(path, id1.String()))
@ -670,7 +679,9 @@ func TestGetReplications(t *testing.T) {
t.Parallel()
path, qm := initQueueManager(t)
defer os.RemoveAll(path)
t.Cleanup(func() {
shutdown(t, qm)
})
// Initialize 3 queues (2nd and 3rd share the same orgID and localBucket)
require.NoError(t, qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0))
@ -700,7 +711,6 @@ func TestReplicationStartMissingQueue(t *testing.T) {
t.Parallel()
queuePath, qm := initQueueManager(t)
defer os.RemoveAll(filepath.Dir(queuePath))
// Create new queue
err := qm.InitializeQueue(id1, maxQueueSizeBytes, orgID1, localBucketID1, 0)
@ -726,6 +736,9 @@ func TestReplicationStartMissingQueue(t *testing.T) {
// Call startup function
err = qm.StartReplicationQueues(trackedReplications)
require.NoError(t, err)
t.Cleanup(func() {
shutdown(t, qm)
})
// Make sure queue is stored in map
require.NotNil(t, qm.replicationQueues[id1])

View File

@ -77,8 +77,7 @@ func idPointer(id int) *platform.ID {
func TestCreateAndGetReplication(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -101,8 +100,7 @@ func TestCreateAndGetReplication(t *testing.T) {
func TestCreateAndGetReplicationName(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -132,8 +130,7 @@ func TestCreateAndGetReplicationName(t *testing.T) {
func TestCreateAndGetReplicationNameAndID(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -163,8 +160,7 @@ func TestCreateAndGetReplicationNameAndID(t *testing.T) {
func TestCreateAndGetReplicationNameError(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -186,8 +182,7 @@ func TestCreateAndGetReplicationNameError(t *testing.T) {
func TestCreateMissingRemote(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
created, err := testStore.CreateReplication(ctx, initID, createReq)
require.Error(t, err)
@ -203,8 +198,7 @@ func TestCreateMissingRemote(t *testing.T) {
func TestUpdateAndGetReplication(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
insertRemote(t, testStore, updatedReplication.RemoteID)
@ -228,8 +222,7 @@ func TestUpdateAndGetReplication(t *testing.T) {
func TestUpdateResponseInfo(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
insertRemote(t, testStore, updatedReplication.RemoteID)
@ -260,8 +253,7 @@ func TestUpdateResponseInfo(t *testing.T) {
func TestUpdateMissingRemote(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -285,8 +277,7 @@ func TestUpdateMissingRemote(t *testing.T) {
func TestUpdateNoop(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -304,8 +295,7 @@ func TestUpdateNoop(t *testing.T) {
func TestDeleteReplication(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -327,8 +317,7 @@ func TestDeleteReplication(t *testing.T) {
func TestDeleteReplications(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
// Deleting when there is no bucket is OK.
_, err := testStore.DeleteBucketReplications(ctx, replication.LocalBucketID)
@ -387,8 +376,7 @@ func TestListReplications(t *testing.T) {
t.Run("list all for org", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
allRepls := setup(t, testStore)
listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: createReq.OrgID})
@ -407,8 +395,7 @@ func TestListReplications(t *testing.T) {
t.Run("list all with empty filter", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
allRepls := setup(t, testStore)
otherOrgReq := createReq
@ -425,8 +412,7 @@ func TestListReplications(t *testing.T) {
t.Run("list by name", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
allRepls := setup(t, testStore)
listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{
@ -440,8 +426,7 @@ func TestListReplications(t *testing.T) {
t.Run("list by remote ID", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
allRepls := setup(t, testStore)
listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{
@ -455,8 +440,7 @@ func TestListReplications(t *testing.T) {
t.Run("list by bucket ID", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
allRepls := setup(t, testStore)
listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{
@ -470,8 +454,7 @@ func TestListReplications(t *testing.T) {
t.Run("list by other org ID", func(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
listed, err := testStore.ListReplications(ctx, influxdb.ReplicationListFilter{OrgID: platform.ID(2)})
require.NoError(t, err)
@ -482,8 +465,7 @@ func TestListReplications(t *testing.T) {
func TestMigrateDownFromReplicationsWithName(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
insertRemote(t, testStore, replication.RemoteID)
@ -526,7 +508,7 @@ func TestMigrateDownFromReplicationsWithName(t *testing.T) {
}
func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) {
sqlStore, clean := sqlite.NewTestStore(t)
sqlStore := sqlite.NewTestStore(t)
logger := zaptest.NewLogger(t)
sqliteMigrator := sqlite.NewMigrator(sqlStore, logger)
require.NoError(t, sqliteMigrator.UpUntil(ctx, 7, migrations.AllUp))
@ -536,7 +518,6 @@ func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) {
require.NoError(t, err)
testStore := NewStore(sqlStore)
defer clean(t)
insertRemote(t, testStore, replication.RemoteID)
@ -561,8 +542,7 @@ func TestMigrateUpToRemotesNullRemoteOrg(t *testing.T) {
func TestGetFullHTTPConfig(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
// Does not exist returns the appropriate error
_, err := testStore.GetFullHTTPConfig(ctx, initID)
@ -582,8 +562,7 @@ func TestGetFullHTTPConfig(t *testing.T) {
func TestPopulateRemoteHTTPConfig(t *testing.T) {
t.Parallel()
testStore, clean := newTestStore(t)
defer clean(t)
testStore := newTestStore(t)
emptyConfig := &influxdb.ReplicationHTTPConfig{RemoteOrgID: idPointer(0)}
@ -606,8 +585,8 @@ func TestPopulateRemoteHTTPConfig(t *testing.T) {
require.Equal(t, want, *target)
}
func newTestStore(t *testing.T) (*Store, func(t *testing.T)) {
sqlStore, clean := sqlite.NewTestStore(t)
func newTestStore(t *testing.T) *Store {
sqlStore := sqlite.NewTestStore(t)
logger := zaptest.NewLogger(t)
sqliteMigrator := sqlite.NewMigrator(sqlStore, logger)
require.NoError(t, sqliteMigrator.Up(ctx, migrations.AllUp))
@ -616,7 +595,7 @@ func newTestStore(t *testing.T) (*Store, func(t *testing.T)) {
_, err := sqlStore.DB.Exec("PRAGMA foreign_keys = ON;")
require.NoError(t, err)
return NewStore(sqlStore), clean
return NewStore(sqlStore)
}
func insertRemote(t *testing.T, store *Store, id platform.ID) {

View File

@ -28,8 +28,7 @@ type tableInfo struct {
func TestUp(t *testing.T) {
t.Parallel()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
upsOnlyAll, err := test_migrations.AllUp.ReadDir(".")
require.NoError(t, err)
@ -69,8 +68,7 @@ func TestUpErrors(t *testing.T) {
t.Parallel()
t.Run("only unknown migration exists", func(t *testing.T) {
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
ctx := context.Background()
migrator := NewMigrator(store, zaptest.NewLogger(t))
@ -80,8 +78,7 @@ func TestUpErrors(t *testing.T) {
})
t.Run("known + unknown migrations exist", func(t *testing.T) {
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
ctx := context.Background()
migrator := NewMigrator(store, zaptest.NewLogger(t))
@ -94,8 +91,7 @@ func TestUpErrors(t *testing.T) {
func TestUpWithBackups(t *testing.T) {
t.Parallel()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
logger := zaptest.NewLogger(t)
migrator := NewMigrator(store, logger)
@ -140,8 +136,7 @@ func TestUpWithBackups(t *testing.T) {
func TestDown(t *testing.T) {
t.Parallel()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
upsOnlyAll, err := test_migrations.AllUp.ReadDir(".")
require.NoError(t, err)

View File

@ -1,24 +1,21 @@
package sqlite
import (
"os"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
)
func NewTestStore(t *testing.T) (*SqlStore, func(t *testing.T)) {
tempDir, err := os.MkdirTemp("", "")
require.NoError(t, err, "unable to create temporary test directory")
func NewTestStore(t *testing.T) *SqlStore {
tempDir := t.TempDir()
s, err := NewSqlStore(tempDir+"/"+DefaultFilename, zap.NewNop())
require.NoError(t, err, "unable to open testing database")
cleanUpFn := func(t *testing.T) {
t.Cleanup(func() {
require.NoError(t, s.Close(), "failed to close testing database")
require.NoErrorf(t, os.RemoveAll(tempDir), "unable to delete temporary test directory %s", tempDir)
}
})
return s, cleanUpFn
return s
}

View File

@ -14,8 +14,7 @@ func TestFlush(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
err := store.execTrans(ctx, `CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY)`)
require.NoError(t, err)
@ -38,8 +37,7 @@ func TestFlushMigrationsTable(t *testing.T) {
t.Parallel()
ctx := context.Background()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
require.NoError(t, store.execTrans(ctx, fmt.Sprintf(`CREATE TABLE %s (id TEXT NOT NULL PRIMARY KEY)`, migrationsTableName)))
require.NoError(t, store.execTrans(ctx, fmt.Sprintf(`INSERT INTO %s (id) VALUES ("one"), ("two"), ("three")`, migrationsTableName)))
@ -59,10 +57,8 @@ func TestBackupSqlStore(t *testing.T) {
// this temporary dir/file is is used as the source db path for testing a bacup
// from a non-memory database. each individual test also creates a separate temporary dir/file
// to backup into.
td, err := os.MkdirTemp("", "")
require.NoError(t, err)
td := t.TempDir()
tf := fmt.Sprintf("%s/%s", td, DefaultFilename)
defer os.RemoveAll(td)
tests := []struct {
name string
@ -94,14 +90,13 @@ func TestBackupSqlStore(t *testing.T) {
require.NoError(t, err)
// create a file to write the backup to.
tempDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
tempDir := t.TempDir()
// open the file to use as a writer for BackupSqlStore
backupPath := tempDir + "/db.sqlite"
dest, err := os.Create(backupPath)
require.NoError(t, err)
defer dest.Close()
// run the backup
err = store.BackupSqlStore(ctx, dest)
@ -130,10 +125,8 @@ func TestRestoreSqlStore(t *testing.T) {
// this temporary dir/file is is used as the destination db path for testing a restore
// into a non-memory database. each individual test also creates a separate temporary dir/file
// to hold a test db to restore from.
td, err := os.MkdirTemp("", "")
require.NoError(t, err)
td := t.TempDir()
tf := fmt.Sprintf("%s/%s", td, DefaultFilename)
defer os.RemoveAll(td)
tests := []struct {
name string
@ -153,13 +146,12 @@ func TestRestoreSqlStore(t *testing.T) {
ctx := context.Background()
// create the test db to restore from
tempDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
tempDir := t.TempDir()
tempFileName := fmt.Sprintf("%s/%s", tempDir, DefaultFilename)
defer os.RemoveAll(tempDir)
restoreDB, err := NewSqlStore(tempFileName, zap.NewNop())
require.NoError(t, err)
t.Cleanup(func() { restoreDB.Close() })
// add some data to the test db
_, err = restoreDB.DB.Exec(`CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY)`)
@ -179,10 +171,12 @@ func TestRestoreSqlStore(t *testing.T) {
// open the test "restore-from" db file as a reader
f, err := os.Open(tempFileName)
require.NoError(t, err)
t.Cleanup(func() { f.Close() })
// open a db to restore into. it will be empty to begin with.
restore, err := NewSqlStore(tt.dbPath, zap.NewNop())
require.NoError(t, err)
t.Cleanup(func() { restore.Close() })
// run the restore
err = restore.RestoreSqlStore(ctx, f)
@ -197,14 +191,15 @@ func TestRestoreSqlStore(t *testing.T) {
require.Equal(t, []string{"one", "two", "three"}, res1)
require.Equal(t, []string{"four", "five", "six"}, res2)
require.NoError(t, f.Close())
}
}
func TestTableNames(t *testing.T) {
t.Parallel()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
ctx := context.Background()
err := store.execTrans(ctx, `CREATE TABLE test_table_1 (id TEXT NOT NULL PRIMARY KEY);
@ -220,8 +215,7 @@ func TestTableNames(t *testing.T) {
func TestAllMigrationNames(t *testing.T) {
t.Parallel()
store, clean := NewTestStore(t)
defer clean(t)
store := NewTestStore(t)
ctx := context.Background()
// Empty db, returns nil slice and no error

View File

@ -50,10 +50,7 @@ type StorageReader struct {
}
func NewStorageReader(tb testing.TB, setupFn SetupFunc) *StorageReader {
rootDir, err := os.MkdirTemp("", "storage-flux-test")
if err != nil {
tb.Fatal(err)
}
rootDir := tb.TempDir()
var closers []io.Closer
close := func() {
@ -62,7 +59,6 @@ func NewStorageReader(tb testing.TB, setupFn SetupFunc) *StorageReader {
tb.Errorf("close error: %s", err)
}
}
_ = os.RemoveAll(rootDir)
}
// Create an underlying kv store. We use the inmem version to speed

View File

@ -179,10 +179,7 @@ func newAnalyticalBackend(t *testing.T, orgSvc influxdb.OrganizationService, buc
// Mostly copied out of cmd/influxd/main.go.
logger := zaptest.NewLogger(t)
rootDir, err := os.MkdirTemp("", "task-logreaderwriter-")
if err != nil {
t.Fatal(err)
}
rootDir := t.TempDir()
engine := storage.NewEngine(rootDir, storage.NewConfig(), storage.WithMetaClient(metaClient))
engine.WithLogger(logger)
@ -194,7 +191,6 @@ func newAnalyticalBackend(t *testing.T, orgSvc influxdb.OrganizationService, buc
defer func() {
if t.Failed() {
engine.Close()
os.RemoveAll(rootDir)
}
}()

View File

@ -20,14 +20,6 @@ type keyValues struct {
values []Value
}
func MustTempDir() string {
dir, err := os.MkdirTemp("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func MustTempFile(dir string) *os.File {
f, err := os.CreateTemp(dir, "tsm1test")
if err != nil {
@ -72,14 +64,15 @@ func newFiles(dir string, values ...keyValues) ([]string, error) {
func TestDescendingCursor_SinglePointStartTime(t *testing.T) {
t.Run("cache", func(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
const START, END = 10, 1
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newIntegerArrayDescendingCursor()
t.Cleanup(cur.Close)
// Include a cached value with timestamp equal to END
cur.reset(START, END, Values{NewIntegerValue(1, 1)}, kc)
@ -95,9 +88,9 @@ func TestDescendingCursor_SinglePointStartTime(t *testing.T) {
}
})
t.Run("tsm", func(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
const START, END = 10, 1
@ -114,8 +107,9 @@ func TestDescendingCursor_SinglePointStartTime(t *testing.T) {
_ = fs.Replace(nil, files)
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newIntegerArrayDescendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
var got []int64
@ -132,9 +126,9 @@ func TestDescendingCursor_SinglePointStartTime(t *testing.T) {
}
func TestFileStore_DuplicatePoints(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
makeVals := func(ts ...int64) []Value {
vals := make([]Value, len(ts))
@ -162,9 +156,10 @@ func TestFileStore_DuplicatePoints(t *testing.T) {
t.Run("ascending", func(t *testing.T) {
const START, END = 0, 100
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayAscendingCursor()
cur.reset(START, END, nil, kc)
t.Cleanup(cur.Close)
var got []int64
ar := cur.Next()
@ -181,9 +176,10 @@ func TestFileStore_DuplicatePoints(t *testing.T) {
t.Run("descending", func(t *testing.T) {
const START, END = 100, 0
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayDescendingCursor()
cur.reset(START, END, nil, kc)
t.Cleanup(cur.Close)
var got []int64
ar := cur.Next()
@ -217,9 +213,9 @@ func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// When calling `nextTSM`, a single block of 1200 timestamps will be returned and the
// array cursor must chuck the values in the Next call.
func TestFileStore_MergeBlocksLargerThat1000_SecondEntirelyContained(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
// makeVals creates count points starting at ts and incrementing by step
makeVals := func(ts, count, step int64) []Value {
@ -256,8 +252,9 @@ func TestFileStore_MergeBlocksLargerThat1000_SecondEntirelyContained(t *testing.
t.Run("ascending", func(t *testing.T) {
const START, END = 1000, 10000
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayAscendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeTs(1000, 800, 10)
@ -279,8 +276,9 @@ func TestFileStore_MergeBlocksLargerThat1000_SecondEntirelyContained(t *testing.
t.Run("descending", func(t *testing.T) {
const START, END = 10000, 0
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayDescendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeTs(1000, 800, 10)
@ -319,9 +317,9 @@ func (a *FloatArray) Swap(i, j int) {
// To verify intersecting data from the second file replaces the first, the values differ,
// so the enumerated results can be compared with the expected output.
func TestFileStore_MergeBlocksLargerThat1000_MultipleBlocksInEachFile(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
// makeVals creates count points starting at ts and incrementing by step
makeVals := func(ts, count, step int64, v float64) []Value {
@ -359,8 +357,9 @@ func TestFileStore_MergeBlocksLargerThat1000_MultipleBlocksInEachFile(t *testing
t.Run("ascending", func(t *testing.T) {
const START, END = 1000, 1e9
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayAscendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1000, 3500, 10, 1.01)
@ -386,8 +385,9 @@ func TestFileStore_MergeBlocksLargerThat1000_MultipleBlocksInEachFile(t *testing
t.Run("descending", func(t *testing.T) {
const START, END = 1e9, 0
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayDescendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1000, 3500, 10, 1.01)
@ -413,9 +413,9 @@ func TestFileStore_MergeBlocksLargerThat1000_MultipleBlocksInEachFile(t *testing
}
func TestFileStore_SeekBoundaries(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
fs := NewFileStore(dir, tsdb.EngineTags{})
t.Cleanup(func() { fs.Close() })
// makeVals creates count points starting at ts and incrementing by step
makeVals := func(ts, count, step int64, v float64) []Value {
@ -453,8 +453,9 @@ func TestFileStore_SeekBoundaries(t *testing.T) {
t.Run("ascending full", func(t *testing.T) {
const START, END = 1000, 1099
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayAscendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1000, 100, 1, 1.01)
@ -478,8 +479,9 @@ func TestFileStore_SeekBoundaries(t *testing.T) {
t.Run("ascending split", func(t *testing.T) {
const START, END = 1050, 1149
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, true)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayAscendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1050, 50, 1, 1.01)
@ -505,8 +507,9 @@ func TestFileStore_SeekBoundaries(t *testing.T) {
t.Run("descending full", func(t *testing.T) {
const START, END = 1099, 1000
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayDescendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1000, 100, 1, 1.01)
@ -531,8 +534,9 @@ func TestFileStore_SeekBoundaries(t *testing.T) {
t.Run("descending split", func(t *testing.T) {
const START, END = 1149, 1050
kc := fs.KeyCursor(context.Background(), []byte("m,_field=v#!~#v"), START, false)
defer kc.Close()
t.Cleanup(kc.Close)
cur := newFloatArrayDescendingCursor()
t.Cleanup(cur.Close)
cur.reset(START, END, nil, kc)
exp := makeArray(1050, 50, 1, 1.01)

View File

@ -541,8 +541,7 @@ func TestCache_Deduplicate_Concurrent(t *testing.T) {
// Ensure the CacheLoader can correctly load from a single segment, even if it's corrupted.
func TestCacheLoader_LoadSingle(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
@ -613,10 +612,15 @@ func TestCacheLoader_LoadSingle(t *testing.T) {
// Ensure the CacheLoader can correctly load from two segments, even if one is corrupted.
func TestCacheLoader_LoadDouble(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f1, f2 := mustTempFile(dir), mustTempFile(dir)
w1, w2 := NewWALSegmentWriter(f1), NewWALSegmentWriter(f2)
t.Cleanup(func() {
f1.Close()
f2.Close()
w1.close()
w2.close()
})
p1 := NewValue(1, 1.1)
p2 := NewValue(1, int64(1))
@ -678,10 +682,13 @@ func TestCacheLoader_LoadDouble(t *testing.T) {
// Ensure the CacheLoader can load deleted series
func TestCacheLoader_LoadDeleted(t *testing.T) {
// Create a WAL segment.
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w := NewWALSegmentWriter(f)
t.Cleanup(func() {
f.Close()
w.close()
})
p1 := NewValue(1, 1.0)
p2 := NewValue(2, 2.0)
@ -781,14 +788,6 @@ func TestCache_Split(t *testing.T) {
}
}
func mustTempDir() string {
dir, err := os.MkdirTemp("", "tsm1-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir
}
func mustTempFile(dir string) *os.File {
f, err := os.CreateTemp(dir, "tsm1test")
if err != nil {

View File

@ -17,8 +17,7 @@ import (
// Tests compacting a Cache snapshot into a single TSM file
func TestCompactor_Snapshot(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(1, float64(1))
v2 := tsm1.NewValue(1, float64(1))
@ -36,9 +35,12 @@ func TestCompactor_Snapshot(t *testing.T) {
}
}
fs := &fakeFileStore{}
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = &fakeFileStore{}
compactor.FileStore = fs
files, err := compactor.WriteSnapshot(c, zap.NewNop())
if err == nil {
@ -61,6 +63,7 @@ func TestCompactor_Snapshot(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 2; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -91,8 +94,7 @@ func TestCompactor_Snapshot(t *testing.T) {
}
func TestCompactor_CompactFullLastTimestamp(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
var vals tsm1.Values
ts := int64(1e9)
@ -106,15 +108,16 @@ func TestCompactor_CompactFullLastTimestamp(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": vals[:100],
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": vals[100:],
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -126,6 +129,8 @@ func TestCompactor_CompactFullLastTimestamp(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
entries := r.Entries([]byte("cpu,host=A#!~#value"))
_, b, err := r.ReadBytes(&entries[0], nil)
if err != nil {
@ -144,15 +149,14 @@ func TestCompactor_CompactFullLastTimestamp(t *testing.T) {
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_CompactFull(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
a2 := tsm1.NewValue(2, 1.2)
b1 := tsm1.NewValue(1, 2.1)
@ -160,7 +164,7 @@ func TestCompactor_CompactFull(t *testing.T) {
"cpu,host=A#!~#value": {a2},
"cpu,host=B#!~#value": {b1},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a3 := tsm1.NewValue(1, 1.3)
c1 := tsm1.NewValue(1, 3.1)
@ -168,10 +172,10 @@ func TestCompactor_CompactFull(t *testing.T) {
"cpu,host=A#!~#value": {a3},
"cpu,host=C#!~#value": {c1},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -216,6 +220,7 @@ func TestCompactor_CompactFull(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 3; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -248,15 +253,14 @@ func TestCompactor_CompactFull(t *testing.T) {
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_DecodeError(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
a2 := tsm1.NewValue(2, 1.2)
b1 := tsm1.NewValue(1, 2.1)
@ -264,7 +268,7 @@ func TestCompactor_DecodeError(t *testing.T) {
"cpu,host=A#!~#value": {a2},
"cpu,host=B#!~#value": {b1},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a3 := tsm1.NewValue(1, 1.3)
c1 := tsm1.NewValue(1, 3.1)
@ -272,7 +276,7 @@ func TestCompactor_DecodeError(t *testing.T) {
"cpu,host=A#!~#value": {a3},
"cpu,host=C#!~#value": {c1},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
f, err := os.OpenFile(f3, os.O_RDWR, os.ModePerm)
if err != nil {
panic(err)
@ -304,8 +308,7 @@ func TestCompactor_DecodeError(t *testing.T) {
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(4, 1.1)
@ -315,7 +318,7 @@ func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2, a3},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
c1 := tsm1.NewValue(3, 1.2)
c2 := tsm1.NewValue(8, 1.2)
@ -324,10 +327,10 @@ func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {c1, c2, c3},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -345,6 +348,7 @@ func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -375,8 +379,7 @@ func TestCompactor_Compact_OverlappingBlocks(t *testing.T) {
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(4, 1.1)
@ -386,7 +389,7 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2, a3},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
b1 := tsm1.NewValue(1, 1.2)
b2 := tsm1.NewValue(2, 1.2)
@ -395,7 +398,7 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {b1, b2, b3},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
c1 := tsm1.NewValue(3, 1.2)
c2 := tsm1.NewValue(8, 1.2)
@ -404,10 +407,10 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {c1, c2, c3},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -425,6 +428,7 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -454,8 +458,7 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) {
}
func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 2 TSM files with different data and one new point
a1 := tsm1.NewValue(4, 1.1)
@ -465,7 +468,7 @@ func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2, a3},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
b1 := tsm1.NewValue(1, 1.2)
b2 := tsm1.NewValue(2, 1.2)
@ -474,11 +477,13 @@ func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {b1, b2, b3},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
fs := &fakeFileStore{}
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = &fakeFileStore{}
compactor.FileStore = fs
compactor.Size = 2
compactor.Open()
@ -493,6 +498,7 @@ func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -522,8 +528,7 @@ func TestCompactor_Compact_UnsortedBlocks(t *testing.T) {
}
func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files where two blocks are overlapping and with unsorted order
a1 := tsm1.NewValue(1, 1.1)
@ -532,7 +537,7 @@ func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
b1 := tsm1.NewValue(3, 1.2)
b2 := tsm1.NewValue(4, 1.2)
@ -540,7 +545,7 @@ func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {b1, b2},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
c1 := tsm1.NewValue(1, 1.1)
c2 := tsm1.NewValue(2, 1.1)
@ -548,11 +553,13 @@ func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {c1, c2},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = &fakeFileStore{}
compactor.FileStore = fs
compactor.Size = 2
compactor.Open()
@ -567,6 +574,7 @@ func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -597,8 +605,7 @@ func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) {
// Ensures that a compaction will properly merge multiple TSM files
func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
@ -606,22 +613,22 @@ func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
a3 := tsm1.NewValue(3, 1.3)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a3},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a4 := tsm1.NewValue(4, 1.4)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a4},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -657,6 +664,7 @@ func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -692,8 +700,7 @@ func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) {
// Ensures that a full compaction will skip over blocks that have the full
// range of time contained in the block tombstoned
func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
@ -701,7 +708,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
ts := tsm1.NewTombstoner(f1, nil)
ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, math.MinInt64, math.MaxInt64)
@ -714,16 +721,16 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a3},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a4 := tsm1.NewValue(4, 1.4)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a4},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -759,6 +766,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -794,8 +802,7 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) {
// Ensures that a full compaction will decode and combine blocks with
// partial tombstoned values
func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
@ -803,7 +810,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
ts := tsm1.NewTombstoner(f1, nil)
// a1 should remain after compaction
@ -817,16 +824,16 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a3},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a4 := tsm1.NewValue(4, 1.4)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a4},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -862,6 +869,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -898,8 +906,7 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) {
// multiple tombstoned ranges within the block e.g. (t1, t2, t3, t4)
// having t2 and t3 removed
func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write 3 TSM files with different data and one new point
a1 := tsm1.NewValue(1, 1.1)
@ -910,7 +917,7 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a1, a2, a3, a4},
}
f1 := MustWriteTSM(dir, 1, writes)
f1 := MustWriteTSM(t, dir, 1, writes)
ts := tsm1.NewTombstoner(f1, nil)
// a1, a3 should remain after compaction
@ -925,16 +932,16 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a5},
}
f2 := MustWriteTSM(dir, 2, writes)
f2 := MustWriteTSM(t, dir, 2, writes)
a6 := tsm1.NewValue(6, 1.6)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": {a6},
}
f3 := MustWriteTSM(dir, 3, writes)
f3 := MustWriteTSM(t, dir, 3, writes)
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -970,6 +977,7 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) {
}
r := MustOpenTSMReader(files[0])
t.Cleanup(func() { r.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
t.Fatalf("keys length mismatch: got %v, exp %v", got, exp)
@ -1009,12 +1017,11 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
if testing.Short() || os.Getenv("CI") != "" || os.Getenv("GORACE") != "" {
t.Skip("Skipping max keys compaction test")
}
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
// write two files where the first contains a single key with the maximum
// number of full blocks that can fit in a TSM file
f1, f1Name := MustTSMWriter(dir, 1)
f1, f1Name := MustTSMWriter(t, dir, 1)
values := make([]tsm1.Value, 1000)
for i := 0; i < 65534; i++ {
values = values[:0]
@ -1032,7 +1039,7 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
// Write a new file with 2 blocks that when compacted would exceed the max
// blocks
f2, f2Name := MustTSMWriter(dir, 2)
f2, f2Name := MustTSMWriter(t, dir, 2)
for i := 0; i < 2; i++ {
lastTimeStamp := values[len(values)-1].UnixNano() + 1
values = values[:0]
@ -1050,7 +1057,7 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
f2.Close()
fs := &fakeFileStore{}
defer fs.Close()
t.Cleanup(func() { fs.Close() })
compactor := tsm1.NewCompactor()
compactor.Dir = dir
compactor.FileStore = fs
@ -1088,20 +1095,21 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) {
// Tests that a single TSM file can be read and iterated over
func TestTSMKeyIterator_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(1, 1.1)
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {v1},
}
r := MustTSMReader(dir, 1, writes)
r := MustTSMReader(t, dir, 1, writes)
t.Cleanup(func() { r.Close() })
iter, err := newTSMKeyIterator(1, false, nil, r)
if err != nil {
t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
}
t.Cleanup(func() { iter.Close() })
var readValues bool
for iter.Next() {
@ -1148,8 +1156,7 @@ func newTSMKeyIterator(size int, fast bool, interrupt chan struct{}, readers ...
// No data is lost but the same point time/value would exist in two files until
// compaction corrects it.
func TestTSMKeyIterator_Duplicate(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(1, int64(1))
v2 := tsm1.NewValue(1, int64(2))
@ -1158,18 +1165,21 @@ func TestTSMKeyIterator_Duplicate(t *testing.T) {
"cpu,host=A#!~#value": {v1},
}
r1 := MustTSMReader(dir, 1, writes1)
r1 := MustTSMReader(t, dir, 1, writes1)
t.Cleanup(func() { r1.Close() })
writes2 := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {v2},
}
r2 := MustTSMReader(dir, 2, writes2)
r2 := MustTSMReader(t, dir, 2, writes2)
t.Cleanup(func() { r2.Close() })
iter, err := newTSMKeyIterator(1, false, nil, r1, r2)
if err != nil {
t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
}
t.Cleanup(func() { iter.Close() })
var readValues bool
for iter.Next() {
@ -1203,18 +1213,18 @@ func TestTSMKeyIterator_Duplicate(t *testing.T) {
// Tests that deleted keys are not seen during iteration with
// TSM files.
func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(2, int64(1))
points1 := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {v1},
}
r1 := MustTSMReader(dir, 1, points1)
r1 := MustTSMReader(t, dir, 1, points1)
if e := r1.Delete([][]byte{[]byte("cpu,host=A#!~#value")}); nil != e {
t.Fatal(e)
}
t.Cleanup(func() { r1.Close() })
v2 := tsm1.NewValue(1, float64(1))
v3 := tsm1.NewValue(1, float64(1))
@ -1224,7 +1234,8 @@ func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
"cpu,host=B#!~#value": {v3},
}
r2 := MustTSMReader(dir, 2, points2)
r2 := MustTSMReader(t, dir, 2, points2)
t.Cleanup(func() { r2.Close() })
r2.Delete([][]byte{[]byte("cpu,host=A#!~#count")})
iter, err := newTSMKeyIterator(1, false, nil, r1, r2)
@ -1272,8 +1283,7 @@ func TestTSMKeyIterator_MultipleKeysDeleted(t *testing.T) {
// Tests that deleted keys are not seen during iteration with
// TSM files.
func TestTSMKeyIterator_SingleDeletes(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(10, int64(1))
v2 := tsm1.NewValue(20, int64(1))
@ -1290,7 +1300,8 @@ func TestTSMKeyIterator_SingleDeletes(t *testing.T) {
"cpu,host=D#!~#value": {v1, v2},
}
r1 := MustTSMReader(dir, 1, points1)
r1 := MustTSMReader(t, dir, 1, points1)
t.Cleanup(func() { r1.Close() })
if e := r1.DeleteRange([][]byte{[]byte("cpu,host=A#!~#value")}, 50, 50); nil != e {
t.Fatal(e)
@ -1312,6 +1323,7 @@ func TestTSMKeyIterator_SingleDeletes(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
}
t.Cleanup(func() { iter.Close() })
var readValues int
var data = []struct {
@ -1354,21 +1366,22 @@ func TestTSMKeyIterator_SingleDeletes(t *testing.T) {
// Tests that the TSMKeyIterator will abort if the interrupt channel is closed
func TestTSMKeyIterator_Abort(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
v1 := tsm1.NewValue(1, 1.1)
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": {v1},
}
r := MustTSMReader(dir, 1, writes)
r := MustTSMReader(t, dir, 1, writes)
t.Cleanup(func() { r.Close() })
intC := make(chan struct{})
iter, err := newTSMKeyIterator(1, false, intC, r)
if err != nil {
t.Fatalf("unexpected error creating WALKeyIterator: %v", err)
}
t.Cleanup(func() { iter.Close() })
var aborted bool
for iter.Next() {
@ -2966,8 +2979,8 @@ func assertValueEqual(t *testing.T, a, b tsm1.Value) {
}
}
func MustTSMWriter(dir string, gen int) (tsm1.TSMWriter, string) {
f := MustTempFile(dir)
func MustTSMWriter(tb testing.TB, dir string, gen int) (tsm1.TSMWriter, string) {
f := MustTempFile(tb, dir)
oldName := f.Name()
// Windows can't rename a file while it's open. Close first, rename and
@ -2995,8 +3008,8 @@ func MustTSMWriter(dir string, gen int) (tsm1.TSMWriter, string) {
return w, newName
}
func MustWriteTSM(dir string, gen int, values map[string][]tsm1.Value) string {
w, name := MustTSMWriter(dir, gen)
func MustWriteTSM(tb testing.TB, dir string, gen int, values map[string][]tsm1.Value) string {
w, name := MustTSMWriter(tb, dir, gen)
keys := make([]string, 0, len(values))
for k := range values {
@ -3021,8 +3034,8 @@ func MustWriteTSM(dir string, gen int, values map[string][]tsm1.Value) string {
return name
}
func MustTSMReader(dir string, gen int, values map[string][]tsm1.Value) *tsm1.TSMReader {
return MustOpenTSMReader(MustWriteTSM(dir, gen, values))
func MustTSMReader(tb testing.TB, dir string, gen int, values map[string][]tsm1.Value) *tsm1.TSMReader {
return MustOpenTSMReader(MustWriteTSM(tb, dir, gen, values))
}
func MustOpenTSMReader(name string) *tsm1.TSMReader {

View File

@ -13,13 +13,13 @@ import (
)
func TestDigest_None(t *testing.T) {
dir := MustTempDir()
dir := t.TempDir()
dataDir := filepath.Join(dir, "data")
if err := os.Mkdir(dataDir, 0755); err != nil {
t.Fatalf("create data dir: %v", err)
}
df := MustTempFile(dir)
df := MustTempFile(t, dir)
files := []string{}
if err := tsm1.Digest(dir, files, df); err != nil {
@ -62,7 +62,7 @@ func TestDigest_None(t *testing.T) {
}
func TestDigest_One(t *testing.T) {
dir := MustTempDir()
dir := t.TempDir()
dataDir := filepath.Join(dir, "data")
if err := os.Mkdir(dataDir, 0755); err != nil {
t.Fatalf("create data dir: %v", err)
@ -72,14 +72,14 @@ func TestDigest_One(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{a1},
}
MustWriteTSM(dir, 1, writes)
MustWriteTSM(t, dir, 1, writes)
files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
t.Fatal(err)
}
df := MustTempFile(dir)
df := MustTempFile(t, dir)
if err := tsm1.Digest(dir, files, df); err != nil {
t.Fatalf("digest error: %v", err)
@ -125,7 +125,7 @@ func TestDigest_One(t *testing.T) {
}
func TestDigest_TimeFilter(t *testing.T) {
dir := MustTempDir()
dir := t.TempDir()
dataDir := filepath.Join(dir, "data")
if err := os.Mkdir(dataDir, 0755); err != nil {
t.Fatalf("create data dir: %v", err)
@ -135,26 +135,26 @@ func TestDigest_TimeFilter(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{a1},
}
MustWriteTSM(dir, 1, writes)
MustWriteTSM(t, dir, 1, writes)
a2 := tsm1.NewValue(2, 2.1)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{a2},
}
MustWriteTSM(dir, 2, writes)
MustWriteTSM(t, dir, 2, writes)
a3 := tsm1.NewValue(3, 3.1)
writes = map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{a3},
}
MustWriteTSM(dir, 3, writes)
MustWriteTSM(t, dir, 3, writes)
files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
t.Fatal(err)
}
df := MustTempFile(dir)
df := MustTempFile(t, dir)
if err := tsm1.DigestWithOptions(dir, files, tsm1.DigestOptions{MinTime: 2, MaxTime: 2}, df); err != nil {
t.Fatalf("digest error: %v", err)
@ -206,7 +206,7 @@ func TestDigest_TimeFilter(t *testing.T) {
}
func TestDigest_KeyFilter(t *testing.T) {
dir := MustTempDir()
dir := t.TempDir()
dataDir := filepath.Join(dir, "data")
if err := os.Mkdir(dataDir, 0755); err != nil {
t.Fatalf("create data dir: %v", err)
@ -216,26 +216,26 @@ func TestDigest_KeyFilter(t *testing.T) {
writes := map[string][]tsm1.Value{
"cpu,host=A#!~#value": []tsm1.Value{a1},
}
MustWriteTSM(dir, 1, writes)
MustWriteTSM(t, dir, 1, writes)
a2 := tsm1.NewValue(2, 2.1)
writes = map[string][]tsm1.Value{
"cpu,host=B#!~#value": []tsm1.Value{a2},
}
MustWriteTSM(dir, 2, writes)
MustWriteTSM(t, dir, 2, writes)
a3 := tsm1.NewValue(3, 3.1)
writes = map[string][]tsm1.Value{
"cpu,host=C#!~#value": []tsm1.Value{a3},
}
MustWriteTSM(dir, 3, writes)
MustWriteTSM(t, dir, 3, writes)
files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", tsm1.TSMFileExtension)))
if err != nil {
t.Fatal(err)
}
df := MustTempFile(dir)
df := MustTempFile(t, dir)
if err := tsm1.DigestWithOptions(dir, files, tsm1.DigestOptions{
MinKey: []byte("cpu,host=B#!~#value"),
@ -284,8 +284,7 @@ func TestDigest_KeyFilter(t *testing.T) {
func TestDigest_Manifest(t *testing.T) {
// Create temp directory to hold test files.
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
digestFile := filepath.Join(dir, tsm1.DigestFilename)
@ -299,7 +298,7 @@ func TestDigest_Manifest(t *testing.T) {
var files []string
gen := 1
for ; gen < 4; gen++ {
name := MustWriteTSM(dir, gen, writes)
name := MustWriteTSM(t, dir, gen, writes)
files = append(files, name)
}
@ -363,7 +362,7 @@ func TestDigest_Manifest(t *testing.T) {
}
// Write an extra tsm file that shouldn't be included in the manifest.
extra := MustWriteTSM(dir, gen, writes)
extra := MustWriteTSM(t, dir, gen, writes)
// Re-generate manifest.
mfest, err = tsm1.NewDigestManifest(dir, files)

View File

@ -12,7 +12,7 @@ import (
// Test that an error is returned if a manifest isn't the first thing written
// to a digest.
func TestEngine_DigestManifestNotWritten(t *testing.T) {
f := MustTempFile("")
f := MustTempFile(t, "")
w, err := tsm1.NewDigestWriter(f)
if err != nil {
t.Fatalf("NewDigestWriter: %v", err)
@ -30,7 +30,7 @@ func TestEngine_DigestManifestNotWritten(t *testing.T) {
// Test that a digest reader will skip over the manifest without error
// if needed.
func TestEngine_DigestReadSkipsManifest(t *testing.T) {
f := MustTempFile("")
f := MustTempFile(t, "")
w, err := tsm1.NewDigestWriter(f)
if err != nil {
t.Fatalf("NewDigestWriter: %v", err)
@ -85,7 +85,7 @@ func TestEngine_DigestReadSkipsManifest(t *testing.T) {
// Test that we get an error if a digest manifest is written twice.
func TestEngine_DigestManifestDoubleWrite(t *testing.T) {
f := MustTempFile("")
f := MustTempFile(t, "")
w, err := tsm1.NewDigestWriter(f)
if err != nil {
t.Fatalf("NewDigestWriter: %v", err)
@ -103,7 +103,7 @@ func TestEngine_DigestManifestDoubleWrite(t *testing.T) {
// Test that we get an error if the manifest is read twice.
func TestEngine_DigestManifestDoubleRead(t *testing.T) {
f := MustTempFile("")
f := MustTempFile(t, "")
w, err := tsm1.NewDigestWriter(f)
if err != nil {
t.Fatalf("NewDigestWriter: %v", err)
@ -143,7 +143,7 @@ func TestEngine_DigestManifestDoubleRead(t *testing.T) {
// Test writing and reading a digest.
func TestEngine_DigestWriterReader(t *testing.T) {
f := MustTempFile("")
f := MustTempFile(t, "")
w, err := tsm1.NewDigestWriter(f)
if err != nil {
t.Fatalf("NewDigestWriter: %v", err)

View File

@ -14,9 +14,7 @@ import (
)
func TestEngine_ConcurrentShardSnapshots(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "shard_test")
require.NoError(t, err, "error creating temporary directory")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -41,7 +39,7 @@ func TestEngine_ConcurrentShardSnapshots(t *testing.T) {
time.Unix(int64(i), 0),
))
}
err = sh.WritePoints(context.Background(), points)
err := sh.WritePoints(context.Background(), points)
require.NoError(t, err)
engineInterface, err := sh.Engine()
@ -82,10 +80,7 @@ func TestEngine_ConcurrentShardSnapshots(t *testing.T) {
func NewSeriesFile(tb testing.TB, tmpDir string) *tsdb.SeriesFile {
tb.Helper()
dir, err := os.MkdirTemp(tmpDir, "tsdb-series-file-")
if err != nil {
panic(err)
}
dir := tb.TempDir()
f := tsdb.NewSeriesFile(dir)
f.Logger = zaptest.NewLogger(tb)
if err := f.Open(); err != nil {

View File

@ -36,7 +36,6 @@ func TestEngine_DeleteWALLoadMetadata(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
if err := e.WritePointsString(
`cpu,host=A value=1.1 1000000000`,
@ -72,7 +71,6 @@ func TestEngine_DeleteSeriesAfterCacheSnapshot(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
if err := e.WritePointsString(
`cpu,host=A value=1.1 1000000000`,
@ -176,7 +174,6 @@ func seriesExist(e *Engine, m string, dims []string) (int, error) {
// Ensure that the engine can write & read shard digest files.
func TestEngine_Digest(t *testing.T) {
e := MustOpenEngine(t, tsi1.IndexName)
defer e.Close()
if err := e.Open(context.Background()); err != nil {
t.Fatalf("failed to open tsm1 engine: %s", err.Error())
@ -324,7 +321,6 @@ type span struct {
// Ensure engine handles concurrent calls to Digest().
func TestEngine_Digest_Concurrent(t *testing.T) {
e := MustOpenEngine(t, tsi1.IndexName)
defer e.Close()
if err := e.Open(context.Background()); err != nil {
t.Fatalf("failed to open tsm1 engine: %s", err.Error())
@ -352,9 +348,11 @@ func TestEngine_Digest_Concurrent(t *testing.T) {
go func() {
defer wg.Done()
<-start
if _, _, err := e.Digest(); err != nil {
r, _, err := e.Digest()
if err != nil {
errs <- err
}
r.Close()
}()
}
@ -374,7 +372,7 @@ func TestEngine_Digest_Concurrent(t *testing.T) {
// Ensure that the engine will backup any TSM files created since the passed in time
func TestEngine_Backup(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
// Generate temporary file.
@ -509,7 +507,7 @@ func TestEngine_Export(t *testing.T) {
p2 := MustParsePointString("cpu,host=B value=1.2 2000000000")
p3 := MustParsePointString("cpu,host=C value=1.3 3000000000")
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
// Write those points to the engine.
@ -766,7 +764,6 @@ func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -823,7 +820,6 @@ func TestEngine_CreateIterator_Cache_Descending(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -879,7 +875,6 @@ func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -937,7 +932,6 @@ func TestEngine_CreateIterator_TSM_Descending(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -995,7 +989,6 @@ func TestEngine_CreateIterator_Aux(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float)
@ -1055,7 +1048,6 @@ func TestEngine_CreateIterator_Condition(t *testing.T) {
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float)
@ -1112,7 +1104,6 @@ func TestEngine_CreateIterator_Condition(t *testing.T) {
func TestIndex_SeriesIDSet(t *testing.T) {
test := func(t *testing.T, index string) error {
engine := MustOpenEngine(t, index)
defer engine.Close()
// Add some series.
engine.MustAddSeries("cpu", map[string]string{"host": "a", "region": "west"})
@ -1224,7 +1215,6 @@ func TestEngine_DeleteSeries(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.writePoints(p1, p2, p3); err != nil {
t.Fatalf("failed to write points: %s", err.Error())
@ -1279,7 +1269,6 @@ func TestEngine_DeleteSeriesRange(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} {
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
@ -1389,7 +1378,6 @@ func TestEngine_DeleteSeriesRangeWithPredicate(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} {
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
@ -1515,7 +1503,6 @@ func TestEngine_DeleteSeriesRangeWithPredicate_Nil(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} {
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
@ -1601,7 +1588,6 @@ func TestEngine_DeleteSeriesRangeWithPredicate_FlushBatch(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} {
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
@ -1720,7 +1706,6 @@ func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
for _, p := range []models.Point{p1} {
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
@ -1798,7 +1783,6 @@ func TestEngine_LastModified(t *testing.T) {
e.CompactionPlan = &mockPlanner{}
e.SetEnabled(false)
require.NoError(t, e.Open(context.Background()))
defer e.Close()
require.NoError(t, e.writePoints(p1, p2, p3))
lm := e.LastModified()
@ -1826,22 +1810,22 @@ func TestEngine_LastModified(t *testing.T) {
}
func TestEngine_SnapshotsDisabled(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
sfile := MustOpenSeriesFile(t)
t.Cleanup(func() { sfile.Close() })
// Generate temporary file.
dir, _ := os.MkdirTemp("", "tsm")
dir := t.TempDir()
walPath := filepath.Join(dir, "wal")
os.MkdirAll(walPath, 0777)
defer os.RemoveAll(dir)
// Create a tsm1 engine.
db := path.Base(dir)
opt := tsdb.NewEngineOptions()
idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt)
defer idx.Close()
t.Cleanup(func() { idx.Close() })
e := tsm1.NewEngine(1, idx, dir, walPath, sfile.SeriesFile, opt).(*tsm1.Engine)
t.Cleanup(func() { e.Close() })
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
@ -1869,7 +1853,6 @@ func TestEngine_ShouldCompactCache(t *testing.T) {
if err != nil {
t.Fatal(err)
}
defer e.Close()
// mock the planner so compactions don't run during the test
e.CompactionPlan = &mockPlanner{}
@ -1911,7 +1894,6 @@ func TestEngine_CreateCursor_Ascending(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -1971,7 +1953,6 @@ func TestEngine_CreateCursor_Descending(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"}))
@ -2031,7 +2012,6 @@ func TestEngine_CreateIterator_SeriesKey(t *testing.T) {
t.Run(index, func(t *testing.T) {
assert := tassert.New(t)
e := MustOpenEngine(t, index)
defer e.Close()
e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float)
e.CreateSeriesIfNotExists([]byte("cpu,host=A,region=east"), []byte("cpu"), models.NewTags(map[string]string{"host": "A", "region": "east"}))
@ -2127,7 +2107,6 @@ func TestEngine_DisableEnableCompactions_Concurrent(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
var wg sync.WaitGroup
wg.Add(2)
@ -2172,7 +2151,6 @@ func TestEngine_WritePoints_TypeConflict(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
if err := e.WritePointsString(
`cpu,host=A value=1.1 1`,
@ -2208,7 +2186,6 @@ func TestEngine_WritePoints_Reload(t *testing.T) {
t.Run(index, func(t *testing.T) {
e := MustOpenEngine(t, index)
defer e.Close()
if err := e.WritePointsString(
`cpu,host=A value=1.1 1`,
@ -2260,7 +2237,6 @@ func TestEngine_Invalid_UTF8(t *testing.T) {
if err := e.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer e.Close()
if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil {
t.Fatalf("create series index error: %v", err)
@ -2298,7 +2274,6 @@ func BenchmarkEngine_WritePoints(b *testing.B) {
}
}
})
e.Close()
}
}
}
@ -2347,7 +2322,6 @@ func BenchmarkEngine_WritePoints_Parallel(b *testing.B) {
}
}
})
e.Close()
}
}
}
@ -2589,14 +2563,13 @@ type Engine struct {
sfile *tsdb.SeriesFile
}
// NewEngine returns a new instance of Engine at a temporary location.
// NewEngine returns a new instance of Engine at a temporary location. The
// Engine is automatically closed by tb.Cleanup when the test and all its
// subtests complete.
func NewEngine(tb testing.TB, index string) (*Engine, error) {
tb.Helper()
root, err := os.MkdirTemp("", "tsm1-")
if err != nil {
panic(err)
}
root := tb.TempDir()
db := "db0"
dbPath := filepath.Join(root, "data", db)
@ -2608,7 +2581,7 @@ func NewEngine(tb testing.TB, index string) (*Engine, error) {
// Setup series file.
sfile := tsdb.NewSeriesFile(filepath.Join(dbPath, tsdb.SeriesFileDirectory))
sfile.Logger = zaptest.NewLogger(tb)
if err = sfile.Open(); err != nil {
if err := sfile.Open(); err != nil {
return nil, err
}
@ -2624,14 +2597,17 @@ func NewEngine(tb testing.TB, index string) (*Engine, error) {
tsm1Engine := tsm1.NewEngine(1, idx, filepath.Join(root, "data"), filepath.Join(root, "wal"), sfile, opt).(*tsm1.Engine)
return &Engine{
e := &Engine{
Engine: tsm1Engine,
root: root,
indexPath: idxPath,
indexType: index,
index: idx,
sfile: sfile,
}, nil
}
tb.Cleanup(func() { e.Close() })
return e, nil
}
// MustOpenEngine returns a new, open instance of Engine.
@ -2766,17 +2742,13 @@ type SeriesFile struct {
}
// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
func NewSeriesFile() *SeriesFile {
dir, err := os.MkdirTemp("", "tsdb-series-file-")
if err != nil {
panic(err)
}
return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
func NewSeriesFile(tb testing.TB) *SeriesFile {
return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(tb.TempDir())}
}
// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error.
func MustOpenSeriesFile() *SeriesFile {
f := NewSeriesFile()
func MustOpenSeriesFile(tb testing.TB) *SeriesFile {
f := NewSeriesFile(tb)
if err := f.Open(); err != nil {
panic(err)
}

View File

@ -2,7 +2,6 @@ package tsm1_test
import (
"context"
"os"
"testing"
"github.com/google/go-cmp/cmp"
@ -310,11 +309,10 @@ func TestFileStore_Array(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
fs := tsm1.NewFileStore(dir, tsdb.EngineTags{})
dir := t.TempDir()
fs := newTestFileStore(t, dir)
files, err := newFiles(dir, tc.data...)
files, err := newFiles(t, dir, tc.data...)
if err != nil {
t.Fatalf("unexpected error creating files: %v", err)
}
@ -324,6 +322,7 @@ func TestFileStore_Array(t *testing.T) {
// Delete part of the block in the first file.
r := MustOpenTSMReader(files[del.f])
r.DeleteRange([][]byte{[]byte("cpu")}, del.min, del.max)
r.Close()
}
}
@ -339,6 +338,7 @@ func TestFileStore_Array(t *testing.T) {
buf := tsdb.NewFloatArrayLen(1000)
c := fs.KeyCursor(context.Background(), []byte("cpu"), tc.time, tc.asc)
t.Cleanup(c.Close)
for i, read := range tc.reads {
// Search for an entry that exists in the second file

File diff suppressed because it is too large Load Diff

View File

@ -238,6 +238,7 @@ func NewTSMReader(f *os.File, options ...tsmReaderOption) (*TSMReader, error) {
index, err := t.accessor.init()
if err != nil {
_ = t.accessor.close()
return nil, err
}

View File

@ -17,8 +17,7 @@ func fatal(t *testing.T, msg string, err error) {
}
func TestTSMReader_Type(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -47,6 +46,7 @@ func TestTSMReader_Type(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
typ, err := r.Type([]byte("cpu"))
if err != nil {
@ -59,8 +59,7 @@ func TestTSMReader_Type(t *testing.T) {
}
func TestTSMReader_MMAP_ReadAll(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -133,8 +132,7 @@ func TestTSMReader_MMAP_ReadAll(t *testing.T) {
}
func TestTSMReader_MMAP_Read(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -212,8 +210,7 @@ func TestTSMReader_MMAP_Read(t *testing.T) {
}
func TestTSMReader_MMAP_Keys(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -291,10 +288,9 @@ func TestTSMReader_MMAP_Keys(t *testing.T) {
}
func TestTSMReader_MMAP_Tombstone(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
t.Cleanup(func() { f.Close() })
w, err := NewTSMWriter(f)
if err != nil {
@ -323,29 +319,29 @@ func TestTSMReader_MMAP_Tombstone(t *testing.T) {
t.Fatalf("unexpected error open file: %v", err)
}
r, err := NewTSMReader(f)
r1, err := NewTSMReader(f)
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r1.Close() })
if err := r.Delete([][]byte{[]byte("mem")}); err != nil {
if err := r1.Delete([][]byte{[]byte("mem")}); err != nil {
t.Fatalf("unexpected error deleting: %v", err)
}
r, err = NewTSMReader(f)
r2, err := NewTSMReader(f)
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
defer r.Close()
t.Cleanup(func() { r2.Close() })
if got, exp := r.KeyCount(), 1; got != exp {
if got, exp := r2.KeyCount(), 1; got != exp {
t.Fatalf("key length mismatch: got %v, exp %v", got, exp)
}
}
func TestTSMReader_MMAP_TombstoneRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -409,8 +405,7 @@ func TestTSMReader_MMAP_TombstoneRange(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -471,8 +466,7 @@ func TestTSMReader_MMAP_TombstoneOutsideTimeRange(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -533,8 +527,7 @@ func TestTSMReader_MMAP_TombstoneOutsideKeyRange(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -599,8 +592,7 @@ func TestTSMReader_MMAP_TombstoneOverlapKeyRange(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -652,8 +644,7 @@ func TestTSMReader_MMAP_TombstoneFullRange(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneFullRangeMultiple(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -725,8 +716,7 @@ func TestTSMReader_MMAP_TombstoneFullRangeMultiple(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -784,8 +774,7 @@ func TestTSMReader_MMAP_TombstoneMultipleRanges(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneMultipleRangesFull(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -844,8 +833,7 @@ func TestTSMReader_MMAP_TombstoneMultipleRangesFull(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneMultipleRangesNoOverlap(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -905,8 +893,7 @@ func TestTSMReader_MMAP_TombstoneMultipleRangesNoOverlap(t *testing.T) {
}
func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -989,8 +976,7 @@ func TestTSMReader_MMAP_TombstoneOutsideRange(t *testing.T) {
}
func TestTSMReader_MMAP_Stats(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1052,8 +1038,7 @@ func TestTSMReader_MMAP_Stats(t *testing.T) {
// Ensure that we return an error if we try to open a non-tsm file
func TestTSMReader_VerifiesFileType(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1184,8 +1169,7 @@ func TestDirectIndex_KeyCount(t *testing.T) {
}
func TestBlockIterator_Single(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -1215,6 +1199,7 @@ func TestBlockIterator_Single(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
var count int
iter := r.BlockIterator()
@ -1253,8 +1238,7 @@ func TestBlockIterator_Single(t *testing.T) {
}
func TestBlockIterator_Tombstone(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -1288,6 +1272,7 @@ func TestBlockIterator_Tombstone(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
iter := r.BlockIterator()
for iter.Next() {
@ -1302,8 +1287,7 @@ func TestBlockIterator_Tombstone(t *testing.T) {
}
func TestBlockIterator_MultipleBlocks(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -1338,6 +1322,7 @@ func TestBlockIterator_MultipleBlocks(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
var count int
expData := []Values{values1, values2}
@ -1380,8 +1365,7 @@ func TestBlockIterator_MultipleBlocks(t *testing.T) {
}
func TestBlockIterator_Sorted(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -1406,7 +1390,6 @@ func TestBlockIterator_Sorted(t *testing.T) {
for _, k := range keys {
if err := w.Write([]byte(k), values[k]); err != nil {
t.Fatalf("unexpected error writing: %v", err)
}
}
@ -1427,6 +1410,7 @@ func TestBlockIterator_Sorted(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
var count int
iter := r.BlockIterator()
@ -1457,8 +1441,7 @@ func TestBlockIterator_Sorted(t *testing.T) {
}
func TestIndirectIndex_UnmarshalBinary_BlockCountOverflow(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1492,8 +1475,7 @@ func TestIndirectIndex_UnmarshalBinary_BlockCountOverflow(t *testing.T) {
}
func TestCompacted_NotFull(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
w, err := NewTSMWriter(f)
@ -1523,6 +1505,7 @@ func TestCompacted_NotFull(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
t.Cleanup(func() { r.Close() })
iter := r.BlockIterator()
if !iter.Next() {
@ -1544,8 +1527,7 @@ func TestCompacted_NotFull(t *testing.T) {
}
func TestTSMReader_File_ReadAll(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1652,48 +1634,43 @@ func TestTSMReader_FuzzCrashes(t *testing.T) {
}
for _, c := range cases {
func() {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
filename := filepath.Join(dir, "x.tsm")
if err := os.WriteFile(filename, []byte(c), 0600); err != nil {
t.Fatalf("exp no error, got %s", err)
}
defer os.RemoveAll(dir)
filename := filepath.Join(dir, "x.tsm")
if err := os.WriteFile(filename, []byte(c), 0600); err != nil {
t.Fatalf("exp no error, got %s", err)
}
f, err := os.Open(filename)
if err != nil {
t.Fatalf("exp no error, got %s", err)
}
defer f.Close()
f, err := os.Open(filename)
if err != nil {
t.Fatalf("exp no error, got %s", err)
}
t.Cleanup(func() { f.Close() })
r, err := NewTSMReader(f)
r, err := NewTSMReader(f)
if err != nil {
return
}
t.Cleanup(func() { r.Close() })
iter := r.BlockIterator()
for iter.Next() {
key, _, _, _, _, _, err := iter.Read()
if err != nil {
return
}
defer r.Close()
iter := r.BlockIterator()
for iter.Next() {
key, _, _, _, _, _, err := iter.Read()
if err != nil {
return
}
_, _ = r.Type(key)
_, _ = r.Type(key)
if _, err = r.ReadAll(key); err != nil {
return
}
if _, err = r.ReadAll(key); err != nil {
return
}
}()
}
}
}
func TestTSMReader_File_Read(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1771,8 +1748,7 @@ func TestTSMReader_File_Read(t *testing.T) {
}
func TestTSMReader_References(t *testing.T) {
dir := mustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
f := mustTempFile(dir)
defer f.Close()
@ -1915,7 +1891,7 @@ func TestBatchKeyIterator_Errors(t *testing.T) {
}
func createTestTSM(t *testing.T) (dir string, name string) {
dir = MustTempDir()
dir = t.TempDir()
f := mustTempFile(dir)
name = f.Name()
w, err := NewTSMWriter(f)

View File

@ -10,10 +10,9 @@ import (
)
func TestTombstoner_Add(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
ts := tsm1.NewTombstoner(f.Name(), nil)
entries := mustReadAll(ts)
@ -58,10 +57,9 @@ func TestTombstoner_Add(t *testing.T) {
}
func TestTombstoner_Add_LargeKey(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
ts := tsm1.NewTombstoner(f.Name(), nil)
entries := mustReadAll(ts)
@ -107,10 +105,9 @@ func TestTombstoner_Add_LargeKey(t *testing.T) {
}
func TestTombstoner_Add_Multiple(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
ts := tsm1.NewTombstoner(f.Name(), nil)
entries := mustReadAll(ts)
@ -170,10 +167,9 @@ func TestTombstoner_Add_Multiple(t *testing.T) {
}
func TestTombstoner_Add_Empty(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
ts := tsm1.NewTombstoner(f.Name(), nil)
entries := mustReadAll(ts)
@ -199,10 +195,9 @@ func TestTombstoner_Add_Empty(t *testing.T) {
}
func TestTombstoner_Delete(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
ts := tsm1.NewTombstoner(f.Name(), nil)
ts.Add([][]byte{[]byte("foo")})
@ -237,10 +232,9 @@ func TestTombstoner_Delete(t *testing.T) {
}
func TestTombstoner_ReadV1(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
if err := os.WriteFile(f.Name(), []byte("foo\n"), 0x0600); err != nil {
t.Fatalf("write v1 file: %v", err)
}
@ -279,12 +273,10 @@ func TestTombstoner_ReadV1(t *testing.T) {
}
func TestTombstoner_ReadEmptyV1(t *testing.T) {
dir := MustTempDir()
defer func() { os.RemoveAll(dir) }()
dir := t.TempDir()
f := MustTempFile(dir)
f := MustTempFile(t, dir)
f.Close()
if err := os.Rename(f.Name(), f.Name()+"."+tsm1.TombstoneFileExtension); err != nil {
t.Fatalf("rename tombstone failed: %v", err)
}

View File

@ -271,6 +271,7 @@ func (l *WAL) Open() error {
return err
}
if _, err := fd.Seek(0, io.SeekEnd); err != nil {
_ = fd.Close()
return err
}
l.currentSegmentWriter = NewWALSegmentWriter(fd)
@ -598,16 +599,25 @@ func (l *WAL) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
// Always attempt to close the segment writer. We cannot do this in once.Do
// because if we have already closed the WAL before and reopened it again,
// the next Close() call will not close the new segment writer. For example:
// func main() {
// w.Close() -- (1)
// w.Open()
// w.Close() -- (2)
// }
// (2) needs to close the reopened `currentSegmentWriter` again.
l.traceLogger.Info("Closing WAL file", zap.String("path", l.path))
if l.currentSegmentWriter != nil {
l.sync()
_ = l.currentSegmentWriter.close()
l.currentSegmentWriter = nil
}
l.once.Do(func() {
// Close, but don't set to nil so future goroutines can still be signaled
l.traceLogger.Info("Closing WAL file", zap.String("path", l.path))
close(l.closing)
if l.currentSegmentWriter != nil {
l.sync()
l.currentSegmentWriter.close()
l.currentSegmentWriter = nil
}
})
return nil

View File

@ -25,8 +25,7 @@ func NewWAL(path string, maxConcurrentWrites int, maxWriteDelay time.Duration) *
}
func TestWALWriter_WriteMulti_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -69,8 +68,7 @@ func TestWALWriter_WriteMulti_Single(t *testing.T) {
}
func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -109,8 +107,7 @@ func TestWALWriter_WriteMulti_LargeBatch(t *testing.T) {
}
func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -157,8 +154,7 @@ func TestWALWriter_WriteMulti_Multiple(t *testing.T) {
}
func TestWALWriter_WriteDelete_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -184,8 +180,7 @@ func TestWALWriter_WriteDelete_Single(t *testing.T) {
}
func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -236,8 +231,7 @@ func TestWALWriter_WriteMultiDelete_Multiple(t *testing.T) {
}
func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
require.NoError(t, w.Open())
@ -295,8 +289,7 @@ func TestWALWriter_WriteMultiDeleteRange_Multiple(t *testing.T) {
}
func TestWAL_ClosedSegments(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
require.NoError(t, w.Open())
@ -326,8 +319,7 @@ func TestWAL_ClosedSegments(t *testing.T) {
}
func TestWAL_Delete(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
require.NoError(t, w.Open())
@ -354,9 +346,8 @@ func TestWAL_Delete(t *testing.T) {
}
func TestWALWriter_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w := tsm1.NewWALSegmentWriter(f)
corruption := []byte{1, 4, 0, 0, 0}
@ -401,9 +392,8 @@ func TestWALWriter_Corrupt(t *testing.T) {
// Reproduces a `panic: runtime error: makeslice: cap out of range` when run with
// GOARCH=386 go test -run TestWALSegmentReader_Corrupt -v ./tsdb/engine/tsm1/
func TestWALSegmentReader_Corrupt(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w := tsm1.NewWALSegmentWriter(f)
p4 := tsm1.NewValue(1, "string")
@ -439,8 +429,7 @@ func TestWALSegmentReader_Corrupt(t *testing.T) {
}
func TestWALRollSegment(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
require.NoError(t, w.Open())
@ -509,8 +498,7 @@ func TestWAL_DiskSize(t *testing.T) {
require.Equal(t, old+cur, w.DiskSizeBytes(), "total disk size")
}
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := t.TempDir()
w := NewWAL(dir, 0, 0)
@ -556,6 +544,8 @@ func TestWAL_DiskSize(t *testing.T) {
require.NoError(t, w.Remove(closedSegments))
test(w, true, false)
require.NoError(t, w.Close())
}
func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) {
@ -690,8 +680,7 @@ func BenchmarkWAL_WriteMulti_Concurrency(b *testing.B) {
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := b.TempDir()
w := NewWAL(dir, 0, 0)
defer w.Close()
@ -748,10 +737,9 @@ func BenchmarkWALSegmentWriter(b *testing.B) {
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := b.TempDir()
f := MustTempFile(dir)
f := MustTempFile(b, dir)
w := tsm1.NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{
@ -771,10 +759,9 @@ func BenchmarkWALSegmentReader(b *testing.B) {
points[k] = append(points[k], tsm1.NewValue(int64(i), 1.1))
}
dir := MustTempDir()
defer os.RemoveAll(dir)
dir := b.TempDir()
f := MustTempFile(dir)
f := MustTempFile(b, dir)
w := tsm1.NewWALSegmentWriter(f)
write := &tsm1.WriteWALEntry{

View File

@ -47,9 +47,8 @@ func TestTSMWriter_Write_NoValues(t *testing.T) {
}
func TestTSMWriter_Write_Single(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -113,9 +112,8 @@ func TestTSMWriter_Write_Single(t *testing.T) {
}
func TestTSMWriter_Write_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -174,9 +172,8 @@ func TestTSMWriter_Write_Multiple(t *testing.T) {
}
func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -242,9 +239,8 @@ func TestTSMWriter_Write_MultipleKeyValues(t *testing.T) {
// Tests that writing keys in reverse is able to read them back.
func TestTSMWriter_Write_SameKey(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -311,9 +307,8 @@ func TestTSMWriter_Write_SameKey(t *testing.T) {
// Tests that calling Read returns all the values for block matching the key
// and timestamp
func TestTSMWriter_Read_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -395,9 +390,8 @@ func TestTSMWriter_Read_Multiple(t *testing.T) {
}
func TestTSMWriter_WriteBlock_Empty(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -429,9 +423,8 @@ func TestTSMWriter_WriteBlock_Empty(t *testing.T) {
}
func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -487,8 +480,9 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error created reader: %v", err)
}
defer r.Close()
f = MustTempFile(dir)
f = MustTempFile(t, dir)
w, err = tsm1.NewTSMWriter(f)
if err != nil {
t.Fatalf("unexpected error creating writer: %v", err)
@ -516,6 +510,7 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
if err != nil {
t.Fatalf("unexpected error open file: %v", err)
}
defer fd.Close()
// Now create a reader to verify the written blocks matches the originally
// written file using Write
@ -544,9 +539,8 @@ func TestTSMWriter_WriteBlock_Multiple(t *testing.T) {
}
func TestTSMWriter_WriteBlock_MaxKey(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {
@ -564,10 +558,8 @@ func TestTSMWriter_WriteBlock_MaxKey(t *testing.T) {
}
func TestTSMWriter_Write_MaxKey(t *testing.T) {
dir := MustTempDir()
defer os.RemoveAll(dir)
f := MustTempFile(dir)
defer f.Close()
dir := t.TempDir()
f := MustTempFile(t, dir)
w, err := tsm1.NewTSMWriter(f)
if err != nil {

View File

@ -12,7 +12,7 @@ import (
// Ensure fileset can return an iterator over all series in the index.
func TestFileSet_SeriesIDIterator(t *testing.T) {
idx := MustOpenIndex(1)
idx := MustOpenIndex(t, 1)
defer idx.Close()
// Create initial set of series.
@ -81,7 +81,7 @@ func TestFileSet_SeriesIDIterator(t *testing.T) {
// Ensure fileset can return an iterator over all series for one measurement.
func TestFileSet_MeasurementSeriesIDIterator(t *testing.T) {
idx := MustOpenIndex(1)
idx := MustOpenIndex(t, 1)
defer idx.Close()
// Create initial set of series.
@ -147,7 +147,7 @@ func TestFileSet_MeasurementSeriesIDIterator(t *testing.T) {
// Ensure fileset can return an iterator over all measurements for the index.
func TestFileSet_MeasurementIterator(t *testing.T) {
idx := MustOpenIndex(1)
idx := MustOpenIndex(t, 1)
defer idx.Close()
// Create initial set of series.
@ -221,7 +221,7 @@ func TestFileSet_MeasurementIterator(t *testing.T) {
// Ensure fileset can return an iterator over all keys for one measurement.
func TestFileSet_TagKeyIterator(t *testing.T) {
idx := MustOpenIndex(1)
idx := MustOpenIndex(t, 1)
defer idx.Close()
// Create initial set of series.

View File

@ -13,7 +13,7 @@ import (
// Ensure a simple index file can be built and opened.
func TestCreateIndexFile(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f, err := CreateIndexFile(sfile.SeriesFile, []Series{
@ -33,7 +33,7 @@ func TestCreateIndexFile(t *testing.T) {
}
func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f, err := CreateIndexFile(sfile.SeriesFile, []Series{
@ -82,7 +82,7 @@ func TestIndexFile_TagKeySeriesIDIterator(t *testing.T) {
// Ensure index file generation can be successfully built.
func TestGenerateIndexFile(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
// Build generated index file.
@ -126,7 +126,7 @@ func TestGenerateIndexFile_Uvarint(t *testing.T) {
// Ensure a MeasurementHashSeries returns false when all series are tombstoned.
func TestIndexFile_MeasurementHasSeries_Tombstoned(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f, err := CreateIndexFile(sfile.SeriesFile, []Series{
@ -146,17 +146,17 @@ func TestIndexFile_MeasurementHasSeries_Tombstoned(t *testing.T) {
func BenchmarkIndexFile_TagValueSeries(b *testing.B) {
b.Run("M=1,K=2,V=3", func(b *testing.B) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 1, 2, 3))
})
b.Run("M=10,K=5,V=5", func(b *testing.B) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 5, 5))
})
b.Run("M=10,K=7,V=5", func(b *testing.B) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 7, 7))
})

View File

@ -10,7 +10,7 @@ import (
// Ensure multiple index files can be compacted together.
func TestIndexFiles_WriteTo(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
// Write first file.

View File

@ -27,7 +27,7 @@ const M, K = 4096, 6
// Ensure index can iterate over all measurement names.
func TestIndex_ForEachMeasurementName(t *testing.T) {
idx := MustOpenDefaultIndex()
idx := MustOpenDefaultIndex(t)
defer idx.Close()
// Add series to index.
@ -80,7 +80,7 @@ func TestIndex_ForEachMeasurementName(t *testing.T) {
// Ensure index can return whether a measurement exists.
func TestIndex_MeasurementExists(t *testing.T) {
idx := MustOpenDefaultIndex()
idx := MustOpenDefaultIndex(t)
defer idx.Close()
// Add series to index.
@ -142,7 +142,7 @@ func TestIndex_MeasurementExists(t *testing.T) {
// Ensure index can return a list of matching measurements.
func TestIndex_MeasurementNamesByRegex(t *testing.T) {
idx := MustOpenDefaultIndex()
idx := MustOpenDefaultIndex(t)
defer idx.Close()
// Add series to index.
@ -167,7 +167,7 @@ func TestIndex_MeasurementNamesByRegex(t *testing.T) {
// Ensure index can delete a measurement and all related keys, values, & series.
func TestIndex_DropMeasurement(t *testing.T) {
idx := MustOpenDefaultIndex()
idx := MustOpenDefaultIndex(t)
defer idx.Close()
// Add series to index.
@ -213,7 +213,7 @@ func TestIndex_DropMeasurement(t *testing.T) {
}
func TestIndex_OpenFail(t *testing.T) {
idx := NewDefaultIndex()
idx := NewDefaultIndex(t)
require.NoError(t, idx.Open())
idx.Index.Close()
// mess up the index:
@ -237,16 +237,15 @@ func TestIndex_OpenFail(t *testing.T) {
}
func TestIndex_Open(t *testing.T) {
// Opening a fresh index should set the MANIFEST version to current version.
idx := NewDefaultIndex()
t.Run("open new index", func(t *testing.T) {
if err := idx.Open(); err != nil {
t.Fatal(err)
}
// Opening a fresh index should set the MANIFEST version to current version.
idx := MustOpenDefaultIndex(t)
t.Cleanup(func() { assert.NoError(t, idx.Close()) })
// Check version set appropriately.
for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ {
partition := idx.PartitionAt(i)
if got, exp := partition.Manifest().Version, 1; got != exp {
t.Fatalf("got index version %d, expected %d", got, exp)
}
@ -254,6 +253,7 @@ func TestIndex_Open(t *testing.T) {
for i := 0; i < int(idx.PartitionN); i++ {
p := idx.PartitionAt(i)
if got, exp := p.NeedsCompaction(false), false; got != exp {
t.Fatalf("got needs compaction %v, expected %v", got, exp)
}
@ -262,19 +262,28 @@ func TestIndex_Open(t *testing.T) {
// Reopening an open index should return an error.
t.Run("reopen open index", func(t *testing.T) {
idx := MustOpenDefaultIndex(t)
t.Cleanup(func() { assert.NoError(t, idx.Close()) })
// Manually closing the existing SeriesFile so that it won't be left
// opened after idx.Open(), which calls another idx.SeriesFile.Open().
//
// This is required for t.TempDir() to be cleaned-up successfully on
// Windows.
assert.NoError(t, idx.SeriesFile.Close())
err := idx.Open()
if err == nil {
idx.Close()
t.Fatal("didn't get an error on reopen, but expected one")
}
idx.Close()
})
// Opening an incompatible index should return an error.
incompatibleVersions := []int{-1, 0, 2}
for _, v := range incompatibleVersions {
t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) {
idx = NewDefaultIndex()
idx := NewDefaultIndex(t)
// Manually create a MANIFEST file for an incompatible index version.
// under one of the partitions.
partitionPath := filepath.Join(idx.Path(), "2")
@ -298,8 +307,8 @@ func TestIndex_Open(t *testing.T) {
// Opening this index should return an error because the MANIFEST has an
// incompatible version.
err = idx.Open()
t.Cleanup(func() { assert.NoError(t, idx.Close()) })
if !errors.Is(err, tsi1.ErrIncompatibleVersion) {
idx.Close()
t.Fatalf("got error %v, expected %v", err, tsi1.ErrIncompatibleVersion)
}
})
@ -308,7 +317,7 @@ func TestIndex_Open(t *testing.T) {
func TestIndex_Manifest(t *testing.T) {
t.Run("current MANIFEST", func(t *testing.T) {
idx := MustOpenIndex(tsi1.DefaultPartitionN)
idx := MustOpenIndex(t, tsi1.DefaultPartitionN)
// Check version set appropriately.
for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ {
@ -317,11 +326,13 @@ func TestIndex_Manifest(t *testing.T) {
t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
}
}
require.NoError(t, idx.Close())
})
}
func TestIndex_DiskSizeBytes(t *testing.T) {
idx := MustOpenIndex(tsi1.DefaultPartitionN)
idx := MustOpenIndex(t, tsi1.DefaultPartitionN)
defer idx.Close()
// Add series to index.
@ -359,9 +370,9 @@ func TestIndex_DiskSizeBytes(t *testing.T) {
}
func TestIndex_TagValueSeriesIDIterator(t *testing.T) {
idx1 := MustOpenDefaultIndex() // Uses the single series creation method CreateSeriesIfNotExists
idx1 := MustOpenDefaultIndex(t) // Uses the single series creation method CreateSeriesIfNotExists
defer idx1.Close()
idx2 := MustOpenDefaultIndex() // Uses the batch series creation method CreateSeriesListIfNotExists
idx2 := MustOpenDefaultIndex(t) // Uses the batch series creation method CreateSeriesListIfNotExists
defer idx2.Close()
// Add some series.
@ -519,21 +530,21 @@ type Index struct {
}
// NewIndex returns a new instance of Index at a temporary path.
func NewIndex(partitionN uint64) *Index {
idx := &Index{SeriesFile: NewSeriesFile()}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(MustTempDir()))
func NewIndex(tb testing.TB, partitionN uint64) *Index {
idx := &Index{SeriesFile: NewSeriesFile(tb)}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(tb.TempDir()))
idx.Index.PartitionN = partitionN
return idx
}
// NewIndex returns a new instance of Index with default number of partitions at a temporary path.
func NewDefaultIndex() *Index {
return NewIndex(tsi1.DefaultPartitionN)
func NewDefaultIndex(tb testing.TB) *Index {
return NewIndex(tb, tsi1.DefaultPartitionN)
}
// MustOpenIndex returns a new, open index. Panic on error.
func MustOpenIndex(partitionN uint64) *Index {
idx := NewIndex(partitionN)
func MustOpenIndex(tb testing.TB, partitionN uint64) *Index {
idx := NewIndex(tb, partitionN)
if err := idx.Open(); err != nil {
panic(err)
}
@ -541,8 +552,8 @@ func MustOpenIndex(partitionN uint64) *Index {
}
// MustOpenIndex returns a new, open index with the default number of partitions.
func MustOpenDefaultIndex() *Index {
return MustOpenIndex(tsi1.DefaultPartitionN)
func MustOpenDefaultIndex(tb testing.TB) *Index {
return MustOpenIndex(tb, tsi1.DefaultPartitionN)
}
// Open opens the underlying tsi1.Index and tsdb.SeriesFile
@ -555,7 +566,6 @@ func (idx Index) Open() error {
// Close closes and removes the index directory.
func (idx *Index) Close() error {
defer os.RemoveAll(idx.Path())
// Series file is opened first and must be closed last
if err := idx.Index.Close(); err != nil {
return err
@ -671,7 +681,7 @@ var tsiditr tsdb.SeriesIDIterator
func BenchmarkIndex_IndexFile_TagValueSeriesIDIterator(b *testing.B) {
runBenchMark := func(b *testing.B, cacheSize int) {
var err error
sfile := NewSeriesFile()
sfile := NewSeriesFile(b)
// Load index
idx := tsi1.NewIndex(sfile.SeriesFile, "foo",
tsi1.WithPath("testdata/index-file-index"),
@ -771,7 +781,7 @@ func BenchmarkIndex_CreateSeriesListIfNotExists(b *testing.B) {
b.Run(fmt.Sprintf("batch size %d", sz), func(b *testing.B) {
for _, partition := range partitions {
b.Run(fmt.Sprintf("partition %d", partition), func(b *testing.B) {
idx := MustOpenIndex(partition)
idx := MustOpenIndex(b, partition)
for j := 0; j < b.N; j++ {
for i := 0; i < len(keys); i += sz {
k := keys[i : i+sz]
@ -786,7 +796,7 @@ func BenchmarkIndex_CreateSeriesListIfNotExists(b *testing.B) {
if err := idx.Close(); err != nil {
b.Fatal(err)
}
idx = MustOpenIndex(partition)
idx = MustOpenIndex(b, partition)
b.StartTimer()
}
})
@ -847,8 +857,8 @@ func BenchmarkIndex_ConcurrentWriteQuery(b *testing.B) {
}
runBenchmark := func(b *testing.B, queryN int, partitions uint64, cacheSize int) {
idx := &Index{SeriesFile: NewSeriesFile()}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(MustTempDir()), tsi1.WithSeriesIDCacheSize(cacheSize))
idx := &Index{SeriesFile: NewSeriesFile(b)}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(b.TempDir()), tsi1.WithSeriesIDCacheSize(cacheSize))
idx.Index.PartitionN = partitions
if err := idx.Open(); err != nil {
@ -906,8 +916,8 @@ func BenchmarkIndex_ConcurrentWriteQuery(b *testing.B) {
}
// Re-open everything
idx := &Index{SeriesFile: NewSeriesFile()}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(MustTempDir()), tsi1.WithSeriesIDCacheSize(cacheSize))
idx := &Index{SeriesFile: NewSeriesFile(b)}
idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(b.TempDir()), tsi1.WithSeriesIDCacheSize(cacheSize))
idx.Index.PartitionN = partitions
if err := idx.Open(); err != nil {

View File

@ -23,7 +23,7 @@ import (
// Ensure log file can append series.
func TestLogFile_AddSeriesList(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)
@ -114,7 +114,7 @@ func TestLogFile_AddSeriesList(t *testing.T) {
}
func TestLogFile_SeriesStoredInOrder(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)
@ -171,7 +171,7 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) {
// Ensure log file can delete an existing measurement.
func TestLogFile_DeleteMeasurement(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)
@ -210,7 +210,7 @@ func TestLogFile_DeleteMeasurement(t *testing.T) {
// Ensure log file can recover correctly.
func TestLogFile_Open(t *testing.T) {
t.Run("Truncate", func(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
seriesSet := tsdb.NewSeriesIDSet()
@ -270,7 +270,7 @@ func TestLogFile_Open(t *testing.T) {
})
t.Run("ChecksumMismatch", func(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
seriesSet := tsdb.NewSeriesIDSet()
@ -313,7 +313,7 @@ func TestLogFile_Open(t *testing.T) {
}
func TestLogFile_MeasurementHasSeries(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)
@ -447,7 +447,7 @@ func GenerateLogFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*L
}
func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesValueN int) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
b.StopTimer()
@ -505,7 +505,7 @@ func BenchmarkLogFile_WriteTo(b *testing.B) {
for _, seriesN := range []int{1000, 10000, 100000, 1000000} {
name := fmt.Sprintf("series=%d", seriesN)
b.Run(name, func(b *testing.B) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)
@ -549,7 +549,7 @@ func BenchmarkLogFile_WriteTo(b *testing.B) {
func benchmarkLogFile_MeasurementHasSeries(b *testing.B, seriesKeyN, seriesValueN int) {
b.StopTimer()
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
f := MustOpenLogFile(sfile.SeriesFile)

View File

@ -370,9 +370,12 @@ func (p *Partition) Close() error {
p.mu.Lock()
defer p.mu.Unlock()
var err error
if p.fileSet == nil {
return nil
}
// Close log files.
var err error
for _, f := range p.fileSet.files {
if localErr := f.Close(); localErr != nil {
err = localErr
@ -508,7 +511,7 @@ func (p *Partition) prependActiveLogFile() (rErr error) {
// Prepend and generate new fileset but do not yet update the partition
newFileSet := p.fileSet.PrependLogFile(f)
errors2.Capture(&rErr, func() error {
defer errors2.Capture(&rErr, func() error {
if rErr != nil {
// close the new file.
f.Close()

View File

@ -13,11 +13,11 @@ import (
)
func TestPartition_Open(t *testing.T) {
sfile := MustOpenSeriesFile()
sfile := MustOpenSeriesFile(t)
defer sfile.Close()
// Opening a fresh index should set the MANIFEST version to current version.
p := NewPartition(sfile.SeriesFile)
p := NewPartition(t, sfile.SeriesFile)
t.Run("open new index", func(t *testing.T) {
if err := p.Open(); err != nil {
t.Fatal(err)
@ -43,7 +43,7 @@ func TestPartition_Open(t *testing.T) {
incompatibleVersions := []int{-1, 0, 2}
for _, v := range incompatibleVersions {
t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) {
p = NewPartition(sfile.SeriesFile)
p = NewPartition(t, sfile.SeriesFile)
// Manually create a MANIFEST file for an incompatible index version.
mpath := filepath.Join(p.Path(), tsi1.ManifestFileName)
m := tsi1.NewManifest(mpath)
@ -73,10 +73,12 @@ func TestPartition_Open(t *testing.T) {
func TestPartition_Manifest(t *testing.T) {
t.Run("current MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
sfile := MustOpenSeriesFile(t)
t.Cleanup(func() { sfile.Close() })
p := MustOpenPartition(t, sfile.SeriesFile)
t.Cleanup(func() { p.Close() })
p := MustOpenPartition(sfile.SeriesFile)
if got, exp := p.Manifest().Version, tsi1.Version; got != exp {
t.Fatalf("got MANIFEST version %d, expected %d", got, exp)
}
@ -97,15 +99,15 @@ func TestPartition_Manifest_Write_Fail(t *testing.T) {
func TestPartition_PrependLogFile_Write_Fail(t *testing.T) {
t.Run("write MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
sfile := MustOpenSeriesFile(t)
t.Cleanup(func() { sfile.Close() })
p := MustOpenPartition(sfile.SeriesFile)
defer func() {
p := MustOpenPartition(t, sfile.SeriesFile)
t.Cleanup(func() {
if err := p.Close(); err != nil {
t.Fatalf("error closing partition: %v", err)
}
}()
})
p.Partition.MaxLogFileSize = -1
fileN := p.FileN()
p.CheckLogFile()
@ -123,15 +125,15 @@ func TestPartition_PrependLogFile_Write_Fail(t *testing.T) {
func TestPartition_Compact_Write_Fail(t *testing.T) {
t.Run("write MANIFEST", func(t *testing.T) {
sfile := MustOpenSeriesFile()
defer sfile.Close()
sfile := MustOpenSeriesFile(t)
t.Cleanup(func() { sfile.Close() })
p := MustOpenPartition(sfile.SeriesFile)
defer func() {
p := MustOpenPartition(t, sfile.SeriesFile)
t.Cleanup(func() {
if err := p.Close(); err != nil {
t.Fatalf("error closing partition: %v", err)
}
}()
})
p.Partition.MaxLogFileSize = -1
fileN := p.FileN()
p.Compact()
@ -153,13 +155,13 @@ type Partition struct {
}
// NewPartition returns a new instance of Partition at a temporary path.
func NewPartition(sfile *tsdb.SeriesFile) *Partition {
return &Partition{Partition: tsi1.NewPartition(sfile, MustTempPartitionDir())}
func NewPartition(tb testing.TB, sfile *tsdb.SeriesFile) *Partition {
return &Partition{Partition: tsi1.NewPartition(sfile, MustTempPartitionDir(tb))}
}
// MustOpenPartition returns a new, open index. Panic on error.
func MustOpenPartition(sfile *tsdb.SeriesFile) *Partition {
p := NewPartition(sfile)
func MustOpenPartition(tb testing.TB, sfile *tsdb.SeriesFile) *Partition {
p := NewPartition(tb, sfile)
if err := p.Open(); err != nil {
panic(err)
}
@ -168,7 +170,6 @@ func MustOpenPartition(sfile *tsdb.SeriesFile) *Partition {
// Close closes and removes the index directory.
func (p *Partition) Close() error {
defer os.RemoveAll(p.Path())
return p.Partition.Close()
}

View File

@ -10,7 +10,7 @@ import (
)
func TestSQLIndexExporter_ExportIndex(t *testing.T) {
idx := MustOpenIndex(1)
idx := MustOpenIndex(t, 1)
defer idx.Close()
// Add series to index.

View File

@ -255,19 +255,10 @@ func (itr *SeriesIDIterator) Next() (elem tsdb.SeriesIDElem) {
return elem
}
// MustTempDir returns a temporary directory. Panic on error.
func MustTempDir() string {
path, err := os.MkdirTemp("", "tsi-")
if err != nil {
panic(err)
}
return path
}
// MustTempDir returns a temporary directory for a partition. Panic on error.
func MustTempPartitionDir() string {
path := MustTempDir()
path = filepath.Join(path, "0")
// MustTempPartitionDir returns a temporary directory for a partition. Panic on
// error.
func MustTempPartitionDir(tb testing.TB) string {
path := filepath.Join(tb.TempDir(), "0")
if err := os.Mkdir(path, 0777); err != nil {
panic(err)
}
@ -287,17 +278,13 @@ type SeriesFile struct {
}
// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
func NewSeriesFile() *SeriesFile {
dir, err := os.MkdirTemp("", "tsdb-series-file-")
if err != nil {
panic(err)
}
return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
func NewSeriesFile(tb testing.TB) *SeriesFile {
return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(tb.TempDir())}
}
// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error.
func MustOpenSeriesFile() *SeriesFile {
f := NewSeriesFile()
func MustOpenSeriesFile(tb testing.TB) *SeriesFile {
f := NewSeriesFile(tb)
if err := f.Open(); err != nil {
panic(err)
}
@ -306,7 +293,6 @@ func MustOpenSeriesFile() *SeriesFile {
// Close closes the log file and removes it from disk.
func (f *SeriesFile) Close() error {
defer os.RemoveAll(f.Path())
return f.SeriesFile.Close()
}

View File

@ -369,10 +369,7 @@ func MustNewIndex(tb testing.TB, index string, eopts ...EngineOption) *Index {
opt(&opts)
}
rootPath, err := os.MkdirTemp("", "influxdb-tsdb")
if err != nil {
panic(err)
}
rootPath := tb.TempDir()
seriesPath, err := os.MkdirTemp(rootPath, tsdb.SeriesFileDirectory)
if err != nil {

View File

@ -53,7 +53,7 @@ func TestParseSeriesKeyInto(t *testing.T) {
// Ensure that broken series files are closed
func TestSeriesFile_Open_WhenFileCorrupt_ShouldReturnErr(t *testing.T) {
f := NewBrokenSeriesFile([]byte{0, 0, 0, 0, 0})
f := NewBrokenSeriesFile(t, []byte{0, 0, 0, 0, 0})
defer f.Close()
f.Logger = zaptest.NewLogger(t)
@ -319,16 +319,20 @@ type SeriesFile struct {
}
// NewSeriesFile returns a new instance of SeriesFile with a temporary file path.
func NewSeriesFile() *SeriesFile {
dir, err := os.MkdirTemp("", "tsdb-series-file-")
if err != nil {
panic(err)
}
return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
func NewSeriesFile(tb testing.TB) *SeriesFile {
dir := tb.TempDir()
f := &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)}
tb.Cleanup(func() {
f.Close()
})
return f
}
func NewBrokenSeriesFile(content []byte) *SeriesFile {
sFile := NewSeriesFile()
func NewBrokenSeriesFile(tb testing.TB, content []byte) *SeriesFile {
sFile := NewSeriesFile(tb)
fPath := sFile.Path()
sFile.Open()
sFile.SeriesFile.Close()
@ -348,7 +352,7 @@ func NewBrokenSeriesFile(content []byte) *SeriesFile {
func MustOpenSeriesFile(tb testing.TB) *SeriesFile {
tb.Helper()
f := NewSeriesFile()
f := NewSeriesFile(tb)
f.Logger = zaptest.NewLogger(tb)
if err := f.Open(); err != nil {
panic(err)
@ -356,12 +360,6 @@ func MustOpenSeriesFile(tb testing.TB) *SeriesFile {
return f
}
// Close closes the log file and removes it from disk.
func (f *SeriesFile) Close() error {
defer os.RemoveAll(f.Path())
return f.SeriesFile.Close()
}
// Reopen close & reopens the series file.
func (f *SeriesFile) Reopen() error {
if err := f.SeriesFile.Close(); err != nil {

View File

@ -10,8 +10,7 @@ import (
)
func TestSeriesIndex_Count(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
if err := idx.Open(); err != nil {
@ -30,8 +29,7 @@ func TestSeriesIndex_Count(t *testing.T) {
}
func TestSeriesIndex_Delete(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
if err := idx.Open(); err != nil {
@ -53,8 +51,7 @@ func TestSeriesIndex_Delete(t *testing.T) {
}
func TestSeriesIndex_FindIDBySeriesKey(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
if err := idx.Open(); err != nil {
@ -86,8 +83,7 @@ func TestSeriesIndex_FindIDBySeriesKey(t *testing.T) {
}
func TestSeriesIndex_FindOffsetByID(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index"))
if err := idx.Open(); err != nil {

View File

@ -11,8 +11,7 @@ import (
)
func TestSeriesSegment(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
// Create a new initial segment (4mb) and initialize for writing.
segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
@ -69,8 +68,7 @@ func TestSeriesSegment(t *testing.T) {
}
func TestSeriesSegment_AppendSeriesIDs(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
if err != nil {
@ -97,8 +95,7 @@ func TestSeriesSegment_AppendSeriesIDs(t *testing.T) {
}
func TestSeriesSegment_MaxSeriesID(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))
if err != nil {
@ -142,8 +139,7 @@ func TestSeriesSegmentHeader(t *testing.T) {
}
func TestSeriesSegment_PartialWrite(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
// Create a new initial segment (4mb) and initialize for writing.
segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000"))

View File

@ -3,7 +3,6 @@ package tsdb
import (
"context"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
@ -367,10 +366,7 @@ func NewTempShard(tb testing.TB, index string) *TempShard {
tb.Helper()
// Create temporary path for data and WAL.
dir, err := os.MkdirTemp("", "influxdb-tsdb-")
if err != nil {
panic(err)
}
dir := tb.TempDir()
// Create series file.
sfile := NewSeriesFile(filepath.Join(dir, "db0", SeriesFileDirectory))
@ -398,7 +394,6 @@ func NewTempShard(tb testing.TB, index string) *TempShard {
// Close closes the shard and removes all underlying data.
func (sh *TempShard) Close() error {
defer os.RemoveAll(sh.path)
sh.sfile.Close()
return sh.Shard.Close()
}

View File

@ -5,9 +5,6 @@ import (
"context"
"errors"
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"math"
"os"
"path/filepath"
@ -20,6 +17,9 @@ import (
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/influxdata/influxdb/v2/influxql/query"
"github.com/influxdata/influxdb/v2/internal"
"github.com/influxdata/influxdb/v2/models"
@ -33,8 +33,7 @@ import (
)
func TestShardWriteAndIndex(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -99,11 +98,12 @@ func TestShardWriteAndIndex(t *testing.T) {
if err != nil {
t.Fatalf(err.Error())
}
sh.Close()
}
func TestShardRebuildIndex(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -177,11 +177,12 @@ func TestShardRebuildIndex(t *testing.T) {
if err != nil {
t.Fatalf(err.Error())
}
sh.Close()
}
func TestShard_Open_CorruptFieldsIndex(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -192,6 +193,7 @@ func TestShard_Open_CorruptFieldsIndex(t *testing.T) {
opts.Config.WALDir = filepath.Join(tmpDir, "wal")
sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
t.Cleanup(func() { sh.Close() })
// Calling WritePoints when the engine is not open will return
// ErrEngineClosed.
@ -230,8 +232,7 @@ func TestShard_Open_CorruptFieldsIndex(t *testing.T) {
}
func TestWriteTimeTag(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -280,8 +281,7 @@ func TestWriteTimeTag(t *testing.T) {
}
func TestWriteTimeField(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -315,8 +315,7 @@ func TestWriteTimeField(t *testing.T) {
}
func TestShardWriteAddNewField(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -367,8 +366,7 @@ func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) {
if testing.Short() || runtime.GOOS == "windows" {
t.Skip("Skipping on short and windows")
}
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -455,8 +453,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {
if testing.Short() {
t.Skip()
}
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -605,8 +602,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) {
// Ensures that when a shard is closed, it removes any series meta-data
// from the index.
func TestShard_Close_RemoveIndex(t *testing.T) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
defer os.RemoveAll(tmpDir)
tmpDir := t.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
@ -620,6 +616,7 @@ func TestShard_Close_RemoveIndex(t *testing.T) {
if err := sh.Open(context.Background()); err != nil {
t.Fatalf("error opening shard: %s", err.Error())
}
t.Cleanup(func() { sh.Close() })
pt := models.MustNewPoint(
"cpu",
@ -730,11 +727,9 @@ cpu,host=serverB,region=uswest value=25 0
// Ensure a shard can create iterators for its underlying data.
func TestShard_CreateIterator_Descending(t *testing.T) {
var sh *Shard
var itr query.Iterator
test := func(t *testing.T, index string) {
sh = NewShard(t, index)
sh := NewShard(t, index)
defer sh.Close()
// Calling CreateIterator when the engine is not open will return
// ErrEngineClosed.
@ -757,7 +752,7 @@ cpu,host=serverB,region=uswest value=25 0
// Create iterator.
var err error
m = &influxql.Measurement{Name: "cpu"}
itr, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{
itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{
Expr: influxql.MustParseExpr(`value`),
Aux: []influxql.VarRef{{Val: "val2"}},
Dimensions: []string{"host"},
@ -768,6 +763,7 @@ cpu,host=serverB,region=uswest value=25 0
if err != nil {
t.Fatal(err)
}
defer itr.Close()
fitr := itr.(query.FloatIterator)
// Read values from iterator.
@ -810,8 +806,6 @@ cpu,host=serverB,region=uswest value=25 0
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(t, index) })
sh.Close()
itr.Close()
}
}
@ -962,13 +956,12 @@ cpu,secret=foo value=100 0
}
func TestShard_Disabled_WriteQuery(t *testing.T) {
var sh *Shard
test := func(t *testing.T, index string) {
sh = NewShard(t, index)
sh := NewShard(t, index)
if err := sh.Open(context.Background()); err != nil {
t.Fatal(err)
}
defer sh.Close()
sh.SetEnabled(false)
@ -980,19 +973,13 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
)
err := sh.WritePoints(context.Background(), []models.Point{pt})
if err == nil {
t.Fatalf("expected shard disabled error")
}
if err != tsdb.ErrShardDisabled {
t.Fatalf(err.Error())
if !errors.Is(err, tsdb.ErrShardDisabled) {
t.Fatalf("expected shard disabled error: %v", err.Error())
}
m := &influxql.Measurement{Name: "cpu"}
_, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{})
if err == nil {
t.Fatalf("expected shard disabled error")
}
if exp := tsdb.ErrShardDisabled; got != exp {
t.Fatalf("got %v, expected %v", got, exp)
_, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{})
if exp := tsdb.ErrShardDisabled; !errors.Is(err, exp) {
t.Fatalf("got %v, expected %v", err, exp)
}
sh.SetEnabled(true)
@ -1002,21 +989,21 @@ func TestShard_Disabled_WriteQuery(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
m = &influxql.Measurement{Name: "cpu"}
if _, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{}); err != nil {
t.Fatalf("unexpected error: %v", got)
itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
assert2.NoError(t, itr.Close())
}
for _, index := range tsdb.RegisteredIndexes() {
t.Run(index, func(t *testing.T) { test(t, index) })
sh.Close()
}
}
func TestShard_Closed_Functions(t *testing.T) {
var sh *Shard
test := func(t *testing.T, index string) {
sh = NewShard(t, index)
sh := NewShard(t, index)
if err := sh.Open(context.Background()); err != nil {
t.Fatal(err)
}
@ -1523,8 +1510,7 @@ func TestMeasurementFieldSet_SaveLoad(t *testing.T) {
const measurement = "cpu"
const fieldName = "value"
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
path := filepath.Join(dir, "fields.idx")
mf, err := tsdb.NewMeasurementFieldSet(path, nil)
@ -1572,8 +1558,7 @@ func TestMeasurementFieldSet_SaveLoad(t *testing.T) {
}
func TestMeasurementFieldSet_Corrupt(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
path := filepath.Join(dir, "fields.idx")
func() {
@ -1621,8 +1606,7 @@ func TestMeasurementFieldSet_Corrupt(t *testing.T) {
}
func TestMeasurementFieldSet_CorruptChangeFile(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
testFields := []struct {
Measurement string
@ -1707,8 +1691,7 @@ func TestMeasurementFieldSet_DeleteEmpty(t *testing.T) {
const measurement = "cpu"
const fieldName = "value"
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
path := filepath.Join(dir, "fields.idx")
mf, err := tsdb.NewMeasurementFieldSet(path, nil)
@ -1777,8 +1760,7 @@ func checkMeasurementFieldSetClose(t *testing.T, fs *tsdb.MeasurementFieldSet) {
}
func TestMeasurementFieldSet_InvalidFormat(t *testing.T) {
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
path := filepath.Join(dir, "fields.idx")
@ -1795,8 +1777,7 @@ func TestMeasurementFieldSet_InvalidFormat(t *testing.T) {
func TestMeasurementFieldSet_ConcurrentSave(t *testing.T) {
var iterations int
dir, cleanup := MustTempDir()
defer cleanup()
dir := t.TempDir()
if testing.Short() {
iterations = 50
@ -2075,7 +2056,7 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
// Run the benchmark loop.
for n := 0; n < b.N; n++ {
shard, tmpDir, err := openShard(sfile)
shard, err := openShard(b, sfile)
if err != nil {
shard.Close()
b.Fatal(err)
@ -2087,7 +2068,6 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
b.StopTimer()
shard.Close()
os.RemoveAll(tmpDir)
}
}
@ -2111,7 +2091,7 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
shard, tmpDir, err := openShard(sfile)
shard, err := openShard(b, sfile)
defer func() {
_ = shard.Close()
}()
@ -2136,7 +2116,6 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt
// Call the function being benchmarked.
chunkedWrite(shard, points)
}
os.RemoveAll(tmpDir)
}
func benchmarkWritePointsExistingSeriesFields(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
@ -2159,7 +2138,7 @@ func benchmarkWritePointsExistingSeriesFields(b *testing.B, mCnt, tkCnt, tvCnt,
_ = sfile.Close()
}()
shard, tmpDir, err := openShard(sfile)
shard, err := openShard(b, sfile)
defer func() {
_ = shard.Close()
}()
@ -2184,7 +2163,6 @@ func benchmarkWritePointsExistingSeriesFields(b *testing.B, mCnt, tkCnt, tvCnt,
// Call the function being benchmarked.
chunkedWrite(shard, points)
}
os.RemoveAll(tmpDir)
}
func benchmarkWritePointsExistingSeriesEqualBatches(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) {
@ -2202,7 +2180,7 @@ func benchmarkWritePointsExistingSeriesEqualBatches(b *testing.B, mCnt, tkCnt, t
sfile := MustOpenSeriesFile(b)
defer sfile.Close()
shard, tmpDir, err := openShard(sfile)
shard, err := openShard(b, sfile)
defer func() {
_ = shard.Close()
}()
@ -2242,18 +2220,17 @@ func benchmarkWritePointsExistingSeriesEqualBatches(b *testing.B, mCnt, tkCnt, t
start = end
end += chunkSz
}
os.RemoveAll(tmpDir)
}
func openShard(sfile *SeriesFile) (*tsdb.Shard, string, error) {
tmpDir, _ := os.MkdirTemp("", "shard_test")
func openShard(tb testing.TB, sfile *SeriesFile) (*tsdb.Shard, error) {
tmpDir := tb.TempDir()
tmpShard := filepath.Join(tmpDir, "shard")
tmpWal := filepath.Join(tmpDir, "wal")
opts := tsdb.NewEngineOptions()
opts.Config.WALDir = tmpWal
shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts)
err := shard.Open(context.Background())
return shard, tmpDir, err
return shard, err
}
func BenchmarkCreateIterator(b *testing.B) {
@ -2384,10 +2361,7 @@ func NewShards(tb testing.TB, index string, n int) Shards {
tb.Helper()
// Create temporary path for data and WAL.
dir, err := os.MkdirTemp("", "influxdb-tsdb-")
if err != nil {
panic(err)
}
dir := tb.TempDir()
sfile := MustOpenSeriesFile(tb)
@ -2482,14 +2456,6 @@ func (sh *Shard) MustWritePointsString(s string) {
}
}
func MustTempDir() (string, func()) {
dir, err := os.MkdirTemp("", "shard-test")
if err != nil {
panic(fmt.Sprintf("failed to create temp dir: %v", err))
}
return dir, func() { os.RemoveAll(dir) }
}
type seriesIterator struct {
keys [][]byte
}

View File

@ -154,7 +154,7 @@ func TestStore_BadShard(t *testing.T) {
sh := tsdb.NewTempShard(t, idx)
err := s.OpenShard(context.Background(), sh.Shard, false)
require.NoError(t, err, "opening temp shard")
defer require.NoError(t, sh.Close(), "closing temporary shard")
require.NoError(t, sh.Close(), "closing temporary shard")
s.SetShardOpenErrorForTest(sh.ID(), errors.New(errStr))
err2 := s.OpenShard(context.Background(), sh.Shard, false)
@ -164,6 +164,7 @@ func TestStore_BadShard(t *testing.T) {
// This should succeed with the force (and because opening an open shard automatically succeeds)
require.NoError(t, s.OpenShard(context.Background(), sh.Shard, true), "forced re-opening previously failing shard")
require.NoError(t, sh.Close())
}()
}
}
@ -484,9 +485,11 @@ func TestStore_Open_InvalidDatabaseFile(t *testing.T) {
defer s.Close()
// Create a file instead of a directory for a database.
if _, err := os.Create(filepath.Join(s.Path(), "db0")); err != nil {
f, err := os.Create(filepath.Join(s.Path(), "db0"))
if err != nil {
t.Fatal(err)
}
require.NoError(t, f.Close())
// Store should ignore database since it's a file.
if err := s.Open(context.Background()); err != nil {
@ -511,9 +514,13 @@ func TestStore_Open_InvalidRetentionPolicy(t *testing.T) {
// Create an RP file instead of a directory.
if err := os.MkdirAll(filepath.Join(s.Path(), "db0"), 0777); err != nil {
t.Fatal(err)
} else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0")); err != nil {
}
f, err := os.Create(filepath.Join(s.Path(), "db0", "rp0"))
if err != nil {
t.Fatal(err)
}
require.NoError(t, f.Close())
// Store should ignore retention policy since it's a file, and there should
// be no indices created.
@ -540,9 +547,13 @@ func TestStore_Open_InvalidShard(t *testing.T) {
// Create a non-numeric shard file.
if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0"), 0777); err != nil {
t.Fatal(err)
} else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0", "bad_shard")); err != nil {
}
f, err := os.Create(filepath.Join(s.Path(), "db0", "rp0", "bad_shard"))
if err != nil {
t.Fatal(err)
}
require.NoError(t, f.Close())
// Store should ignore shard since it does not have a numeric name.
if err := s.Open(context.Background()); err != nil {
@ -2390,10 +2401,7 @@ type Store struct {
func NewStore(tb testing.TB, index string) *Store {
tb.Helper()
path, err := os.MkdirTemp("", "influxdb-tsdb-")
if err != nil {
panic(err)
}
path := tb.TempDir()
s := &Store{Store: tsdb.NewStore(path), index: index}
s.EngineOptions.IndexVersion = index
@ -2436,7 +2444,6 @@ func (s *Store) Reopen(tb testing.TB) error {
// Close closes the store and removes the underlying data.
func (s *Store) Close() error {
defer os.RemoveAll(s.Path())
return s.Store.Close()
}