fix bad commit with merge issue

pull/10616/head
Lyon Hill 2018-08-01 10:43:31 -06:00
parent 6bba50a2a1
commit 3ba8784fed
2 changed files with 10 additions and 99 deletions

View File

@ -19,40 +19,6 @@ func TestScheduler_EveryValidation(t *testing.T) {
d := mock.NewDesiredState() d := mock.NewDesiredState()
e := mock.NewExecutor() e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5) o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
badScripts := []string{
`option task = {
name: "name",
every: 1ms,
}
from(bucket:"b") |> toHTTP(url: "http://example.com")`,
`option task = {
name: "name",
every: -1h,
}
from(bucket:"b") |> toHTTP(url: "http://example.com")`,
`option task = {
name: "name",
every: 1500ms,
}
from(bucket:"b") |> toHTTP(url: "http://example.com")`,
`option task = {
name: "name",
every: 12.32s,
}
from(bucket:"b") |> toHTTP(url: "http://example.com")`,
}
for _, badScript := range badScripts {
if err := o.ClaimTask(tid, badScript, 3, 99); err == nil {
t.Fatal("no error returned for :", badScript)
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
@ -75,7 +41,6 @@ from(bucket:"b") |> toHTTP(url: "http://example.com")`,
for _, badOption := range badOptions { for _, badOption := range badOptions {
if err := o.ClaimTask(task, 3, &badOption); err == nil { if err := o.ClaimTask(task, 3, &badOption); err == nil {
t.Fatal("no error returned for :", badOption) t.Fatal("no error returned for :", badOption)
>>>>>>> feat(task): update the scheduler and logwriter interface
} }
} }
} }
@ -85,20 +50,11 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
e := mock.NewExecutor() e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5) o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := o.ClaimTask(tid, scriptEveryMinute, 3, 99); err != nil {
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
opts := &options.Options{Every: time.Minute} opts := &options.Options{Every: time.Minute}
if err := o.ClaimTask(task, 3, opts); err != nil { if err := o.ClaimTask(task, 3, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }
@ -108,20 +64,11 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
} }
// For every second, can queue for timestamps 4 and 5. // For every second, can queue for timestamps 4 and 5.
<<<<<<< HEAD
tid = platform.ID{2}
=======
<<<<<<< HEAD
tid = platform.ID(2)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := o.ClaimTask(tid, scriptEverySecond, 3, 5); err != nil {
=======
task = &backend.StoreTask{ task = &backend.StoreTask{
ID: platform.ID{2}, ID: platform.ID{2},
} }
opts = &options.Options{Every: time.Second, Concurrency: 99} opts = &options.Options{Every: time.Second, Concurrency: 99}
if err := o.ClaimTask(task, 3, opts); err != nil { if err := o.ClaimTask(task, 3, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }
@ -135,21 +82,12 @@ func TestScheduler_CreateRunOnTick(t *testing.T) {
e := mock.NewExecutor() e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5) o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := o.ClaimTask(tid, scriptEverySecond, 5, 2); err != nil {
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
opts := &options.Options{Every: time.Second, Concurrency: 2} opts := &options.Options{Every: time.Second, Concurrency: 2}
if err := o.ClaimTask(task, 5, opts); err != nil { if err := o.ClaimTask(task, 5, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }
@ -187,21 +125,12 @@ func TestScheduler_Release(t *testing.T) {
e := mock.NewExecutor() e := mock.NewExecutor()
o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5) o := backend.NewScheduler(d, e, backend.NopLogWriter{}, 5)
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := o.ClaimTask(tid, scriptEverySecond, 5, 2); err != nil {
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
opts := &options.Options{Every: time.Second, Concurrency: 99} opts := &options.Options{Every: time.Second, Concurrency: 99}
if err := o.ClaimTask(task, 5, opts); err != nil { if err := o.ClaimTask(task, 5, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }
@ -227,21 +156,12 @@ func TestScheduler_RunLog(t *testing.T) {
s := backend.NewScheduler(d, e, rl, 5) s := backend.NewScheduler(d, e, rl, 5)
// Claim a task that starts later. // Claim a task that starts later.
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := s.ClaimTask(tid, scriptEverySecond, 5, 2); err != nil {
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
opts := &options.Options{Every: time.Second, Concurrency: 99} opts := &options.Options{Every: time.Second, Concurrency: 99}
if err := s.ClaimTask(task, 5, opts); err != nil { if err := s.ClaimTask(task, 5, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }
@ -371,21 +291,12 @@ func TestScheduler_Metrics(t *testing.T) {
reg.MustRegister(s.(prom.PrometheusCollector).PrometheusCollectors()...) reg.MustRegister(s.(prom.PrometheusCollector).PrometheusCollectors()...)
// Claim a task that starts later. // Claim a task that starts later.
<<<<<<< HEAD
tid := platform.ID{1}
=======
<<<<<<< HEAD
tid := platform.ID(1)
>>>>>>> feat(task): update the scheduler and logwriter interface
if err := s.ClaimTask(tid, scriptEverySecond, 5, 2); err != nil {
=======
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platform.ID{1}, ID: platform.ID{1},
} }
opts := &options.Options{Every: time.Second, Concurrency: 99} opts := &options.Options{Every: time.Second, Concurrency: 99}
if err := s.ClaimTask(task, 5, opts); err != nil { if err := s.ClaimTask(task, 5, opts); err != nil {
>>>>>>> feat(task): update the scheduler and logwriter interface
t.Fatal(err) t.Fatal(err)
} }

View File

@ -41,8 +41,8 @@ func updateRunState(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFun
defer drf(t, writer, reader) defer drf(t, writer, reader)
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platformtesting.MustIDFromString("ab01ab01ab01ab01"), ID: platform.ID([]byte("ab01ab01ab01ab01")),
Org: platform.ID([]byte("org")), Org: platform.ID([]byte("ab01ab01ab01ab05")),
} }
queuedAt := time.Unix(1, 0) queuedAt := time.Unix(1, 0)
run := platform.Run{ run := platform.Run{
@ -103,8 +103,8 @@ func runLogTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc) {
defer drf(t, writer, reader) defer drf(t, writer, reader)
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platformtesting.MustIDFromString("ab01ab01ab01ab01"), ID: platform.ID([]byte("ab01ab01ab01ab01")),
Org: platform.ID([]byte("org")), Org: platform.ID([]byte("ab01ab01ab01ab05")),
} }
run := platform.Run{ run := platform.Run{
@ -152,8 +152,8 @@ func listRunsTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc)
defer drf(t, writer, reader) defer drf(t, writer, reader)
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platformtesting.MustIDFromString("ab01ab01ab01ab01"), ID: platform.ID([]byte("ab01ab01ab01ab01")),
Org: platform.ID([]byte("org")), Org: platform.ID([]byte("ab01ab01ab01ab05")),
} }
if _, err := reader.ListRuns(context.Background(), platform.RunFilter{Task: &task.ID}); err == nil { if _, err := reader.ListRuns(context.Background(), platform.RunFilter{Task: &task.ID}); err == nil {
@ -251,8 +251,8 @@ func findRunByIDTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFu
} }
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platformtesting.MustIDFromString("ab01ab01ab01ab01"), ID: platform.ID([]byte("ab01ab01ab01ab01")),
Org: platform.ID([]byte("org")), Org: platform.ID([]byte("ab01ab01ab01ab05")),
} }
run := platform.Run{ run := platform.Run{
ID: platform.ID([]byte("run")), ID: platform.ID([]byte("run")),
@ -290,8 +290,8 @@ func listLogsTest(t *testing.T, crf CreateRunStoreFunc, drf DestroyRunStoreFunc)
defer drf(t, writer, reader) defer drf(t, writer, reader)
task := &backend.StoreTask{ task := &backend.StoreTask{
ID: platformtesting.MustIDFromString("ab01ab01ab01ab01"), ID: platform.ID([]byte("ab01ab01ab01ab01")),
Org: platform.ID([]byte("org")), Org: platform.ID([]byte("ab01ab01ab01ab05")),
} }
if _, err := reader.ListLogs(context.Background(), platform.LogFilter{}); err == nil { if _, err := reader.ListLogs(context.Background(), platform.LogFilter{}); err == nil {