refactor(tasks): use go Time for Task CreatedAt, UpdatedAt, LatestCompleted, Offset (#15672)
parent
f6dbfec346
commit
f0ecc0e89d
34
crud_log.go
34
crud_log.go
|
@ -1,8 +1,6 @@
|
||||||
package influxdb
|
package influxdb
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -41,35 +39,3 @@ type RealTimeGenerator struct{}
|
||||||
func (g RealTimeGenerator) Now() time.Time {
|
func (g RealTimeGenerator) Now() time.Time {
|
||||||
return time.Now()
|
return time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Duration is based on time.Duration to embed in any struct.
|
|
||||||
type Duration struct {
|
|
||||||
time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler interface.
|
|
||||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(d.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements json.Unmarshaler interface.
|
|
||||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
|
||||||
var v interface{}
|
|
||||||
if err := json.Unmarshal(b, &v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch value := v.(type) {
|
|
||||||
case float64:
|
|
||||||
d.Duration = time.Duration(value)
|
|
||||||
return nil
|
|
||||||
case string:
|
|
||||||
var err error
|
|
||||||
d.Duration, err = time.ParseDuration(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return errors.New("invalid duration")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
package influxdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Duration is based on time.Duration to embed in any struct.
|
||||||
|
type Duration struct {
|
||||||
|
time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler interface.
|
||||||
|
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler interface.
|
||||||
|
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||||
|
var v interface{}
|
||||||
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch value := v.(type) {
|
||||||
|
case float64:
|
||||||
|
d.Duration = time.Duration(value)
|
||||||
|
return nil
|
||||||
|
case string:
|
||||||
|
var err error
|
||||||
|
d.Duration, err = time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return errors.New("invalid duration")
|
||||||
|
}
|
||||||
|
}
|
|
@ -180,6 +180,23 @@ type taskResponse struct {
|
||||||
|
|
||||||
// NewFrontEndTask converts a internal task type to a task that we want to display to users
|
// NewFrontEndTask converts a internal task type to a task that we want to display to users
|
||||||
func NewFrontEndTask(t influxdb.Task) Task {
|
func NewFrontEndTask(t influxdb.Task) Task {
|
||||||
|
latestCompleted := ""
|
||||||
|
if !t.LatestCompleted.IsZero() {
|
||||||
|
latestCompleted = t.LatestCompleted.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
createdAt := ""
|
||||||
|
if !t.CreatedAt.IsZero() {
|
||||||
|
createdAt = t.CreatedAt.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
updatedAt := ""
|
||||||
|
if !t.UpdatedAt.IsZero() {
|
||||||
|
updatedAt = t.UpdatedAt.Format(time.RFC3339)
|
||||||
|
}
|
||||||
|
offset := ""
|
||||||
|
if t.Offset != 0*time.Second {
|
||||||
|
offset = customParseDuration(t.Offset)
|
||||||
|
}
|
||||||
|
|
||||||
return Task{
|
return Task{
|
||||||
ID: t.ID,
|
ID: t.ID,
|
||||||
OrganizationID: t.OrganizationID,
|
OrganizationID: t.OrganizationID,
|
||||||
|
@ -191,16 +208,52 @@ func NewFrontEndTask(t influxdb.Task) Task {
|
||||||
Flux: t.Flux,
|
Flux: t.Flux,
|
||||||
Every: t.Every,
|
Every: t.Every,
|
||||||
Cron: t.Cron,
|
Cron: t.Cron,
|
||||||
Offset: t.Offset,
|
Offset: offset,
|
||||||
LatestCompleted: t.LatestCompleted,
|
LatestCompleted: latestCompleted,
|
||||||
LastRunStatus: t.LastRunStatus,
|
LastRunStatus: t.LastRunStatus,
|
||||||
LastRunError: t.LastRunError,
|
LastRunError: t.LastRunError,
|
||||||
CreatedAt: t.CreatedAt,
|
CreatedAt: createdAt,
|
||||||
UpdatedAt: t.UpdatedAt,
|
UpdatedAt: updatedAt,
|
||||||
Metadata: t.Metadata,
|
Metadata: t.Metadata,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func customParseDuration(d time.Duration) string {
|
||||||
|
str := ""
|
||||||
|
if d < 0 {
|
||||||
|
str = "-"
|
||||||
|
d = d * -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse hours
|
||||||
|
hours := d / time.Hour
|
||||||
|
if hours != 0 {
|
||||||
|
str = fmt.Sprintf("%s%dh", str, hours)
|
||||||
|
}
|
||||||
|
if d%time.Hour == 0 {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
// parse minutes
|
||||||
|
d = d - (time.Duration(hours) * time.Hour)
|
||||||
|
|
||||||
|
min := d / time.Minute
|
||||||
|
if min != 0 {
|
||||||
|
str = fmt.Sprintf("%s%dm", str, min)
|
||||||
|
}
|
||||||
|
if d%time.Minute == 0 {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse seconds
|
||||||
|
d = d - time.Duration(min)*time.Minute
|
||||||
|
sec := d / time.Second
|
||||||
|
|
||||||
|
if sec != 0 {
|
||||||
|
str = fmt.Sprintf("%s%ds", str, sec)
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
func newTaskResponse(t influxdb.Task, labels []*influxdb.Label) taskResponse {
|
func newTaskResponse(t influxdb.Task, labels []*influxdb.Label) taskResponse {
|
||||||
response := taskResponse{
|
response := taskResponse{
|
||||||
Links: map[string]string{
|
Links: map[string]string{
|
||||||
|
|
108
kv/task.go
108
kv/task.go
|
@ -35,6 +35,52 @@ var (
|
||||||
var _ influxdb.TaskService = (*Service)(nil)
|
var _ influxdb.TaskService = (*Service)(nil)
|
||||||
var _ backend.TaskControlService = (*Service)(nil)
|
var _ backend.TaskControlService = (*Service)(nil)
|
||||||
|
|
||||||
|
type kvTask struct {
|
||||||
|
ID influxdb.ID `json:"id"`
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
OrganizationID influxdb.ID `json:"orgID"`
|
||||||
|
Organization string `json:"org"`
|
||||||
|
OwnerID influxdb.ID `json:"ownerID"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Flux string `json:"flux"`
|
||||||
|
Every string `json:"every,omitempty"`
|
||||||
|
Cron string `json:"cron,omitempty"`
|
||||||
|
LastRunStatus string `json:"lastRunStatus,omitempty"`
|
||||||
|
LastRunError string `json:"lastRunError,omitempty"`
|
||||||
|
Offset influxdb.Duration `json:"offset,omitempty"`
|
||||||
|
LatestCompleted time.Time `json:"latestCompleted,omitempty"`
|
||||||
|
LatestScheduled time.Time `json:"latestScheduled,omitempty"`
|
||||||
|
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||||
|
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||||
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func kvToInfluxTask(k *kvTask) *influxdb.Task {
|
||||||
|
return &influxdb.Task{
|
||||||
|
ID: k.ID,
|
||||||
|
Type: k.Type,
|
||||||
|
OrganizationID: k.OrganizationID,
|
||||||
|
Organization: k.Organization,
|
||||||
|
OwnerID: k.OwnerID,
|
||||||
|
Name: k.Name,
|
||||||
|
Description: k.Description,
|
||||||
|
Status: k.Status,
|
||||||
|
Flux: k.Flux,
|
||||||
|
Every: k.Every,
|
||||||
|
Cron: k.Cron,
|
||||||
|
LastRunStatus: k.LastRunStatus,
|
||||||
|
LastRunError: k.LastRunError,
|
||||||
|
Offset: k.Offset.Duration,
|
||||||
|
LatestCompleted: k.LatestCompleted,
|
||||||
|
LatestScheduled: k.LatestScheduled,
|
||||||
|
CreatedAt: k.CreatedAt,
|
||||||
|
UpdatedAt: k.UpdatedAt,
|
||||||
|
Metadata: k.Metadata,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Service) initializeTasks(ctx context.Context, tx Tx) error {
|
func (s *Service) initializeTasks(ctx context.Context, tx Tx) error {
|
||||||
if _, err := tx.Bucket(taskBucket); err != nil {
|
if _, err := tx.Bucket(taskBucket); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -113,12 +159,14 @@ func (s *Service) findTaskByID(ctx context.Context, tx Tx, id influxdb.ID) (*inf
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t := &influxdb.Task{}
|
kvTask := &kvTask{}
|
||||||
if err := json.Unmarshal(v, t); err != nil {
|
if err := json.Unmarshal(v, kvTask); err != nil {
|
||||||
return nil, influxdb.ErrInternalTaskServiceError(err)
|
return nil, influxdb.ErrInternalTaskServiceError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.LatestCompleted == "" {
|
t := kvToInfluxTask(kvTask)
|
||||||
|
|
||||||
|
if t.LatestCompleted.IsZero() {
|
||||||
t.LatestCompleted = t.CreatedAt
|
t.LatestCompleted = t.CreatedAt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -470,11 +518,13 @@ func (s *Service) findAllTasks(ctx context.Context, tx Tx, filter influxdb.TaskF
|
||||||
matchFn := newTaskMatchFn(filter, nil)
|
matchFn := newTaskMatchFn(filter, nil)
|
||||||
|
|
||||||
for k != nil {
|
for k != nil {
|
||||||
t := &influxdb.Task{}
|
kvTask := &kvTask{}
|
||||||
if err := json.Unmarshal(v, t); err != nil {
|
if err := json.Unmarshal(v, kvTask); err != nil {
|
||||||
return nil, 0, influxdb.ErrInternalTaskServiceError(err)
|
return nil, 0, influxdb.ErrInternalTaskServiceError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t := kvToInfluxTask(kvTask)
|
||||||
|
|
||||||
if matchFn == nil || matchFn(t) {
|
if matchFn == nil || matchFn(t) {
|
||||||
ts = append(ts, t)
|
ts = append(ts, t)
|
||||||
|
|
||||||
|
@ -553,7 +603,7 @@ func (s *Service) createTask(ctx context.Context, tx Tx, tc influxdb.TaskCreate)
|
||||||
tc.Status = string(backend.TaskActive)
|
tc.Status = string(backend.TaskActive)
|
||||||
}
|
}
|
||||||
|
|
||||||
createdAt := time.Now().UTC().Format(time.RFC3339)
|
createdAt := time.Now().Truncate(time.Second).UTC()
|
||||||
task := &influxdb.Task{
|
task := &influxdb.Task{
|
||||||
ID: s.IDGenerator.ID(),
|
ID: s.IDGenerator.ID(),
|
||||||
Type: tc.Type,
|
Type: tc.Type,
|
||||||
|
@ -570,8 +620,14 @@ func (s *Service) createTask(ctx context.Context, tx Tx, tc influxdb.TaskCreate)
|
||||||
CreatedAt: createdAt,
|
CreatedAt: createdAt,
|
||||||
LatestCompleted: createdAt,
|
LatestCompleted: createdAt,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.Offset != nil {
|
if opt.Offset != nil {
|
||||||
task.Offset = opt.Offset.String()
|
off, err := time.ParseDuration(opt.Offset.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, influxdb.ErrTaskTimeParse(err)
|
||||||
|
}
|
||||||
|
task.Offset = off
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taskBucket, err := tx.Bucket(taskBucket)
|
taskBucket, err := tx.Bucket(taskBucket)
|
||||||
|
@ -666,7 +722,7 @@ func (s *Service) updateTask(ctx context.Context, tx Tx, id influxdb.ID, upd inf
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
updatedAt := time.Now().UTC().Format(time.RFC3339)
|
updatedAt := time.Now().UTC()
|
||||||
|
|
||||||
// update the flux script
|
// update the flux script
|
||||||
if !upd.Options.IsZero() || upd.Flux != nil {
|
if !upd.Options.IsZero() || upd.Flux != nil {
|
||||||
|
@ -682,11 +738,15 @@ func (s *Service) updateTask(ctx context.Context, tx Tx, id influxdb.ID, upd inf
|
||||||
task.Name = options.Name
|
task.Name = options.Name
|
||||||
task.Every = options.Every.String()
|
task.Every = options.Every.String()
|
||||||
task.Cron = options.Cron
|
task.Cron = options.Cron
|
||||||
if options.Offset == nil {
|
|
||||||
task.Offset = ""
|
var off time.Duration
|
||||||
} else {
|
if options.Offset != nil {
|
||||||
task.Offset = options.Offset.String()
|
off, err = time.ParseDuration(options.Offset.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, influxdb.ErrTaskTimeParse(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
task.Offset = off
|
||||||
task.UpdatedAt = updatedAt
|
task.UpdatedAt = updatedAt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -709,8 +769,8 @@ func (s *Service) updateTask(ctx context.Context, tx Tx, id influxdb.ID, upd inf
|
||||||
|
|
||||||
if upd.LatestCompleted != nil {
|
if upd.LatestCompleted != nil {
|
||||||
// make sure we only update latest completed one way
|
// make sure we only update latest completed one way
|
||||||
tlc, _ := time.Parse(time.RFC3339, task.LatestCompleted)
|
tlc := task.LatestCompleted
|
||||||
ulc, _ := time.Parse(time.RFC3339, *upd.LatestCompleted)
|
ulc := *upd.LatestCompleted
|
||||||
|
|
||||||
if !ulc.IsZero() && ulc.After(tlc) {
|
if !ulc.IsZero() && ulc.After(tlc) {
|
||||||
task.LatestCompleted = *upd.LatestCompleted
|
task.LatestCompleted = *upd.LatestCompleted
|
||||||
|
@ -1298,7 +1358,7 @@ func (s *Service) createNextRun(ctx context.Context, tx Tx, taskID influxdb.ID,
|
||||||
|
|
||||||
nextScheduled := sch.Next(time.Unix(scheduledFor, 0)).UTC()
|
nextScheduled := sch.Next(time.Unix(scheduledFor, 0)).UTC()
|
||||||
offset := &options.Duration{}
|
offset := &options.Duration{}
|
||||||
if err := offset.Parse(task.Offset); err != nil {
|
if err := offset.Parse(task.Offset.String()); err != nil {
|
||||||
return backend.RunCreation{}, influxdb.ErrTaskTimeParse(err)
|
return backend.RunCreation{}, influxdb.ErrTaskTimeParse(err)
|
||||||
}
|
}
|
||||||
nextDueAt, err := offset.Add(nextScheduled)
|
nextDueAt, err := offset.Add(nextScheduled)
|
||||||
|
@ -1558,9 +1618,9 @@ func (s *Service) finishRun(ctx context.Context, tx Tx, taskID, runID influxdb.I
|
||||||
}
|
}
|
||||||
|
|
||||||
// tell task to update latest completed
|
// tell task to update latest completed
|
||||||
scheduledStr := r.ScheduledFor.Format(time.RFC3339)
|
scheduled := r.ScheduledFor
|
||||||
_, err = s.updateTask(ctx, tx, taskID, influxdb.TaskUpdate{
|
_, err = s.updateTask(ctx, tx, taskID, influxdb.TaskUpdate{
|
||||||
LatestCompleted: &scheduledStr,
|
LatestCompleted: &scheduled,
|
||||||
LastRunStatus: &r.Status,
|
LastRunStatus: &r.Status,
|
||||||
LastRunError: func() *string {
|
LastRunError: func() *string {
|
||||||
if r.Status == "failed" {
|
if r.Status == "failed" {
|
||||||
|
@ -1659,7 +1719,7 @@ func (s *Service) nextDueRun(ctx context.Context, tx Tx, taskID influxdb.ID) (in
|
||||||
|
|
||||||
nextScheduled := sch.Next(latestCompleted).UTC()
|
nextScheduled := sch.Next(latestCompleted).UTC()
|
||||||
offset := &options.Duration{}
|
offset := &options.Duration{}
|
||||||
if err := offset.Parse(task.Offset); err != nil {
|
if err := offset.Parse(task.Offset.String()); err != nil {
|
||||||
return 0, 0, influxdb.ErrTaskTimeParse(err)
|
return 0, 0, influxdb.ErrTaskTimeParse(err)
|
||||||
}
|
}
|
||||||
dueAt, err := offset.Add(nextScheduled)
|
dueAt, err := offset.Add(nextScheduled)
|
||||||
|
@ -1777,16 +1837,10 @@ func (s *Service) findLatestScheduledTimeForTask(ctx context.Context, tx Tx, tas
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
if task.LatestCompleted == "" {
|
if task.LatestCompleted.IsZero() {
|
||||||
latestCompleted, err = time.Parse(time.RFC3339, task.CreatedAt)
|
latestCompleted = task.CreatedAt
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, influxdb.ErrTaskTimeParse(err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
latestCompleted, err = time.Parse(time.RFC3339, task.LatestCompleted)
|
latestCompleted = task.LatestCompleted
|
||||||
if err != nil {
|
|
||||||
return time.Time{}, influxdb.ErrTaskTimeParse(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// find out if we have a currently running schedule that is after the latest completed
|
// find out if we have a currently running schedule that is after the latest completed
|
||||||
|
|
|
@ -159,7 +159,7 @@ func TestNextRunDue(t *testing.T) {
|
||||||
|
|
||||||
// +20 to account for the 20 second offset in the flux script
|
// +20 to account for the 20 second offset in the flux script
|
||||||
oldNextDue := run.Created.Now
|
oldNextDue := run.Created.Now
|
||||||
if task.Offset != "" {
|
if task.Offset != 0 {
|
||||||
oldNextDue += 20
|
oldNextDue += 20
|
||||||
}
|
}
|
||||||
if oldNextDue != nd {
|
if oldNextDue != nd {
|
||||||
|
|
27
task.go
27
task.go
|
@ -42,13 +42,13 @@ type Task struct {
|
||||||
Flux string `json:"flux"`
|
Flux string `json:"flux"`
|
||||||
Every string `json:"every,omitempty"`
|
Every string `json:"every,omitempty"`
|
||||||
Cron string `json:"cron,omitempty"`
|
Cron string `json:"cron,omitempty"`
|
||||||
Offset string `json:"offset,omitempty"`
|
Offset time.Duration `json:"offset,omitempty"`
|
||||||
LatestCompleted string `json:"latestCompleted,omitempty"`
|
LatestCompleted time.Time `json:"latestCompleted,omitempty"`
|
||||||
LatestScheduled time.Time `json:"latestScheduled,omitempty"`
|
LatestScheduled time.Time `json:"latestScheduled,omitempty"`
|
||||||
LastRunStatus string `json:"lastRunStatus,omitempty"`
|
LastRunStatus string `json:"lastRunStatus,omitempty"`
|
||||||
LastRunError string `json:"lastRunError,omitempty"`
|
LastRunError string `json:"lastRunError,omitempty"`
|
||||||
CreatedAt string `json:"createdAt,omitempty"`
|
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||||
UpdatedAt string `json:"updatedAt,omitempty"`
|
UpdatedAt time.Time `json:"updatedAt,omitempty"`
|
||||||
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
Metadata map[string]interface{} `json:"metadata,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,23 +67,6 @@ func (t *Task) EffectiveCron() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// LatestCompletedTime gives the time.Time that the task was last queued to be run in RFC3339 format.
|
|
||||||
func (t *Task) LatestCompletedTime() (time.Time, error) {
|
|
||||||
tm := t.LatestCompleted
|
|
||||||
if tm == "" {
|
|
||||||
tm = t.CreatedAt
|
|
||||||
}
|
|
||||||
return time.Parse(time.RFC3339, tm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OffsetDuration gives the time.Duration of the Task's Offset property, which represents a delay before execution
|
|
||||||
func (t *Task) OffsetDuration() (time.Duration, error) {
|
|
||||||
if t.Offset == "" {
|
|
||||||
return time.Duration(0), nil
|
|
||||||
}
|
|
||||||
return time.ParseDuration(t.Offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run is a record createId when a run of a task is scheduled.
|
// Run is a record createId when a run of a task is scheduled.
|
||||||
type Run struct {
|
type Run struct {
|
||||||
ID ID `json:"id,omitempty"`
|
ID ID `json:"id,omitempty"`
|
||||||
|
@ -177,7 +160,7 @@ type TaskUpdate struct {
|
||||||
Description *string `json:"description,omitempty"`
|
Description *string `json:"description,omitempty"`
|
||||||
|
|
||||||
// LatestCompleted us to set latest completed on startup to skip task catchup
|
// LatestCompleted us to set latest completed on startup to skip task catchup
|
||||||
LatestCompleted *string `json:"-"`
|
LatestCompleted *time.Time `json:"-"`
|
||||||
LatestScheduled *time.Time `json:"-"`
|
LatestScheduled *time.Time `json:"-"`
|
||||||
LastRunStatus *string `json:"-"`
|
LastRunStatus *string `json:"-"`
|
||||||
LastRunError *string `json:"-"`
|
LastRunError *string `json:"-"`
|
||||||
|
|
|
@ -33,7 +33,7 @@ func NotifyCoordinatorOfExisting(ctx context.Context, ts TaskService, coord Coor
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
latestCompleted := now().Format(time.RFC3339)
|
latestCompleted := now()
|
||||||
for len(tasks) > 0 {
|
for len(tasks) > 0 {
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
if task.Status != string(TaskActive) {
|
if task.Status != string(TaskActive) {
|
||||||
|
@ -74,7 +74,7 @@ func TaskNotifyCoordinatorOfExisting(ctx context.Context, ts TaskService, tcs Ta
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
latestCompleted := now().Format(time.RFC3339)
|
latestCompleted := now()
|
||||||
for len(tasks) > 0 {
|
for len(tasks) > 0 {
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
if task.Status != string(TaskActive) {
|
if task.Status != string(TaskActive) {
|
||||||
|
|
|
@ -53,8 +53,7 @@ func (t SchedulableTask) Schedule() scheduler.Schedule {
|
||||||
|
|
||||||
// Offset returns a time.Duration for the Task's offset property
|
// Offset returns a time.Duration for the Task's offset property
|
||||||
func (t SchedulableTask) Offset() time.Duration {
|
func (t SchedulableTask) Offset() time.Duration {
|
||||||
offset, _ := t.OffsetDuration()
|
return t.Task.Offset
|
||||||
return offset
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LastScheduled parses the task's LatestCompleted value as a Time object
|
// LastScheduled parses the task's LatestCompleted value as a Time object
|
||||||
|
@ -62,13 +61,11 @@ func (t SchedulableTask) LastScheduled() time.Time {
|
||||||
if !t.LatestScheduled.IsZero() {
|
if !t.LatestScheduled.IsZero() {
|
||||||
return t.LatestScheduled
|
return t.LatestScheduled
|
||||||
}
|
}
|
||||||
if t.LatestCompleted != "" {
|
if !t.LatestCompleted.IsZero() {
|
||||||
latestCompleted, _ := t.LatestCompletedTime()
|
return t.LatestCompleted
|
||||||
return latestCompleted
|
|
||||||
}
|
}
|
||||||
|
|
||||||
createdAt, _ := time.Parse(time.RFC3339, t.CreatedAt)
|
return t.CreatedAt
|
||||||
return createdAt
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func WithLimitOpt(i int) CoordinatorOption {
|
func WithLimitOpt(i int) CoordinatorOption {
|
||||||
|
@ -79,13 +76,7 @@ func WithLimitOpt(i int) CoordinatorOption {
|
||||||
|
|
||||||
// NewSchedulableTask transforms an influxdb task to a schedulable task type
|
// NewSchedulableTask transforms an influxdb task to a schedulable task type
|
||||||
func NewSchedulableTask(task *influxdb.Task) (SchedulableTask, error) {
|
func NewSchedulableTask(task *influxdb.Task) (SchedulableTask, error) {
|
||||||
if offset, err := task.OffsetDuration(); offset != time.Duration(0) && err != nil {
|
|
||||||
return SchedulableTask{}, errors.New("could not create schedulable task: offset duration could not be parsed")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := task.LatestCompletedTime(); err != nil {
|
|
||||||
return SchedulableTask{}, errors.New("could not create schedulable task: latest completed time could not be parsed")
|
|
||||||
}
|
|
||||||
if task.Cron == "" && task.Every == "" {
|
if task.Cron == "" && task.Every == "" {
|
||||||
return SchedulableTask{}, errors.New("invalid cron or every")
|
return SchedulableTask{}, errors.New("invalid cron or every")
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,7 +100,7 @@ func Test_Coordinator_Scheduler_Methods(t *testing.T) {
|
||||||
one = influxdb.ID(1)
|
one = influxdb.ID(1)
|
||||||
two = influxdb.ID(2)
|
two = influxdb.ID(2)
|
||||||
three = influxdb.ID(3)
|
three = influxdb.ID(3)
|
||||||
now = time.Now().Format(time.RFC3339Nano)
|
now = time.Now().UTC()
|
||||||
|
|
||||||
taskOne = &influxdb.Task{ID: one, CreatedAt: now, Cron: "* * * * *"}
|
taskOne = &influxdb.Task{ID: one, CreatedAt: now, Cron: "* * * * *"}
|
||||||
taskTwo = &influxdb.Task{ID: two, Status: "active", CreatedAt: now, Cron: "* * * * *"}
|
taskTwo = &influxdb.Task{ID: two, Status: "active", CreatedAt: now, Cron: "* * * * *"}
|
||||||
|
|
|
@ -16,8 +16,7 @@ var (
|
||||||
three = influxdb.ID(3)
|
three = influxdb.ID(3)
|
||||||
four = influxdb.ID(4)
|
four = influxdb.ID(4)
|
||||||
|
|
||||||
aTime = time.Now()
|
aTime = time.Now().UTC()
|
||||||
aTimeStamp = aTime.Format(time.RFC3339)
|
|
||||||
|
|
||||||
taskOne = &influxdb.Task{ID: one}
|
taskOne = &influxdb.Task{ID: one}
|
||||||
taskTwo = &influxdb.Task{ID: two, Status: "active"}
|
taskTwo = &influxdb.Task{ID: two, Status: "active"}
|
||||||
|
@ -56,7 +55,7 @@ func Test_NotifyCoordinatorOfCreated(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff([]update{
|
if diff := cmp.Diff([]update{
|
||||||
{two, influxdb.TaskUpdate{LatestCompleted: &aTimeStamp}},
|
{two, influxdb.TaskUpdate{LatestCompleted: &aTime}},
|
||||||
}, tasks.updates); diff != "" {
|
}, tasks.updates); diff != "" {
|
||||||
t.Errorf("unexpected updates to task service %v", diff)
|
t.Errorf("unexpected updates to task service %v", diff)
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ func (cs *CoordinatingCheckService) UpdateCheck(ctx context.Context, id influxdb
|
||||||
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
||||||
// this allows us to see not run the task for inactive time
|
// this allows us to see not run the task for inactive time
|
||||||
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
||||||
toTask.LatestCompleted = cs.Now().Format(time.RFC3339)
|
toTask.LatestCompleted = cs.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
||||||
|
@ -112,7 +112,7 @@ func (cs *CoordinatingCheckService) PatchCheck(ctx context.Context, id influxdb.
|
||||||
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
||||||
// this allows us to see not run the task for inactive time
|
// this allows us to see not run the task for inactive time
|
||||||
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
||||||
toTask.LatestCompleted = cs.Now().Format(time.RFC3339)
|
toTask.LatestCompleted = cs.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
return to, cs.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
||||||
|
|
|
@ -216,7 +216,7 @@ func TestCheckUpdateFromInactive(t *testing.T) {
|
||||||
if task.ID != thecheck.GetTaskID() {
|
if task.ID != thecheck.GetTaskID() {
|
||||||
t.Fatalf("task sent to coordinator doesn't match expected")
|
t.Fatalf("task sent to coordinator doesn't match expected")
|
||||||
}
|
}
|
||||||
if task.LatestCompleted != latest.Format(time.RFC3339) {
|
if task.LatestCompleted != latest {
|
||||||
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -233,7 +233,7 @@ func TestCheckUpdateFromInactive(t *testing.T) {
|
||||||
if task.ID != thecheck.GetTaskID() {
|
if task.ID != thecheck.GetTaskID() {
|
||||||
t.Fatalf("task sent to coordinator doesn't match expected")
|
t.Fatalf("task sent to coordinator doesn't match expected")
|
||||||
}
|
}
|
||||||
if task.LatestCompleted != latest.Format(time.RFC3339) {
|
if task.LatestCompleted != latest {
|
||||||
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -76,7 +76,7 @@ func (s *CoordinatingTaskService) UpdateTask(ctx context.Context, id influxdb.ID
|
||||||
if upd.Status != nil && *upd.Status == string(backend.TaskActive) {
|
if upd.Status != nil && *upd.Status == string(backend.TaskActive) {
|
||||||
// confirm that it was inactive and this is an attempt to activate
|
// confirm that it was inactive and this is an attempt to activate
|
||||||
if from.Status == string(backend.TaskInactive) {
|
if from.Status == string(backend.TaskInactive) {
|
||||||
lc := s.now().Format(time.RFC3339)
|
lc := s.now()
|
||||||
upd.LatestCompleted = &lc
|
upd.LatestCompleted = &lc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,7 @@ func TestCoordinatingTaskService_ClaimTaskUpdatesLatestCompleted(t *testing.T) {
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case claimedTask := <-cchan:
|
case claimedTask := <-cchan:
|
||||||
if claimedTask.LatestCompleted != latest.UTC().Format(time.RFC3339) {
|
if claimedTask.LatestCompleted != latest.UTC() {
|
||||||
t.Fatal("failed up update latest completed in claimed task")
|
t.Fatal("failed up update latest completed in claimed task")
|
||||||
}
|
}
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
|
|
|
@ -80,7 +80,7 @@ func (ns *CoordinatingNotificationRuleStore) UpdateNotificationRule(ctx context.
|
||||||
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
||||||
// this allows us to see not run the task for inactive time
|
// this allows us to see not run the task for inactive time
|
||||||
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
||||||
toTask.LatestCompleted = ns.Now().Format(time.RFC3339)
|
toTask.LatestCompleted = ns.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
||||||
|
@ -111,7 +111,7 @@ func (ns *CoordinatingNotificationRuleStore) PatchNotificationRule(ctx context.C
|
||||||
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
// if the update is to activate and the previous task was inactive we should add a "latest completed" update
|
||||||
// this allows us to see not run the task for inactive time
|
// this allows us to see not run the task for inactive time
|
||||||
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
if fromTask.Status == string(backend.TaskInactive) && toTask.Status == string(backend.TaskActive) {
|
||||||
toTask.LatestCompleted = ns.Now().Format(time.RFC3339)
|
toTask.LatestCompleted = ns.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
return to, ns.coordinator.TaskUpdated(ctx, fromTask, toTask)
|
||||||
|
|
|
@ -107,7 +107,7 @@ func TestNotificationRuleUpdateFromInactive(t *testing.T) {
|
||||||
if task.ID != therule.GetTaskID() {
|
if task.ID != therule.GetTaskID() {
|
||||||
t.Fatalf("task sent to coordinator doesn't match expected")
|
t.Fatalf("task sent to coordinator doesn't match expected")
|
||||||
}
|
}
|
||||||
if task.LatestCompleted != latest.Format(time.RFC3339) {
|
if task.LatestCompleted != latest {
|
||||||
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -124,7 +124,7 @@ func TestNotificationRuleUpdateFromInactive(t *testing.T) {
|
||||||
if task.ID != therule.GetTaskID() {
|
if task.ID != therule.GetTaskID() {
|
||||||
t.Fatalf("task sent to coordinator doesn't match expected")
|
t.Fatalf("task sent to coordinator doesn't match expected")
|
||||||
}
|
}
|
||||||
if task.LatestCompleted != latest.Format(time.RFC3339) {
|
if task.LatestCompleted != latest {
|
||||||
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
t.Fatalf("update returned incorrect LatestCompleted, expected %s got %s, or ", latest.Format(time.RFC3339), task.LatestCompleted)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -10,13 +10,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
mockTaskID = influxdb.ID(1)
|
mockTaskID = influxdb.ID(1)
|
||||||
mockTimeNow = time.Now()
|
mockTimeNow = time.Now()
|
||||||
mockTimeNowStr = time.Now().Format(time.RFC3339Nano)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (m MockTaskService) UpdateTask(_ context.Context, id influxdb.ID, _ influxdb.TaskUpdate) (*influxdb.Task, error) {
|
func (m MockTaskService) UpdateTask(_ context.Context, id influxdb.ID, _ influxdb.TaskUpdate) (*influxdb.Task, error) {
|
||||||
return &influxdb.Task{ID: id, UpdatedAt: mockTimeNowStr}, nil
|
return &influxdb.Task{ID: id, UpdatedAt: mockTimeNow}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockTaskService struct{}
|
type MockTaskService struct{}
|
||||||
|
|
|
@ -34,11 +34,13 @@ func TestScheduler_Cancelation(t *testing.T) {
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
|
||||||
const orgID = 2
|
const orgID = 2
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:04Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
OrganizationID: orgID,
|
OrganizationID: orgID,
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:04Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
tcs.SetTask(task)
|
tcs.SetTask(task)
|
||||||
|
@ -78,10 +80,11 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
|
||||||
o.Start(context.Background())
|
o.Start(context.Background())
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:03Z")
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Cron: "* * * * *",
|
Cron: "* * * * *",
|
||||||
LatestCompleted: "1970-01-01T00:00:03Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +102,7 @@ func TestScheduler_StartScriptOnClaim(t *testing.T) {
|
||||||
task = &platform.Task{
|
task = &platform.Task{
|
||||||
ID: platform.ID(2),
|
ID: platform.ID(2),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:03Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,11 +148,12 @@ func TestScheduler_DontRunInactiveTasks(t *testing.T) {
|
||||||
o := backend.NewScheduler(tcs, e, 5)
|
o := backend.NewScheduler(tcs, e, 5)
|
||||||
o.Start(context.Background())
|
o.Start(context.Background())
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Status: "inactive",
|
Status: "inactive",
|
||||||
Flux: `option task = {concurrency: 2, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 2, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
@ -177,11 +181,12 @@ func TestScheduler_CreateNextRunOnTick(t *testing.T) {
|
||||||
o := backend.NewScheduler(tcs, e, 5)
|
o := backend.NewScheduler(tcs, e, 5)
|
||||||
o.Start(context.Background())
|
o.Start(context.Background())
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {concurrency: 2, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 2, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,12 +257,13 @@ func TestScheduler_LogStatisticsOnSuccess(t *testing.T) {
|
||||||
|
|
||||||
const taskID = 0x12345
|
const taskID = 0x12345
|
||||||
const orgID = 0x54321
|
const orgID = 0x54321
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: taskID,
|
ID: taskID,
|
||||||
OrganizationID: orgID,
|
OrganizationID: orgID,
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,11 +321,12 @@ func TestScheduler_Release(t *testing.T) {
|
||||||
o := backend.NewScheduler(tcs, e, 5)
|
o := backend.NewScheduler(tcs, e, 5)
|
||||||
o.Start(context.Background())
|
o.Start(context.Background())
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -351,11 +358,12 @@ func TestScheduler_UpdateTask(t *testing.T) {
|
||||||
s := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
|
s := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
|
||||||
s.Start(context.Background())
|
s.Start(context.Background())
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:50:00Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Cron: "* * * * *",
|
Cron: "* * * * *",
|
||||||
LatestCompleted: "1970-01-01T00:50:00Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,11 +410,12 @@ func TestScheduler_Queue(t *testing.T) {
|
||||||
o := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
|
o := backend.NewScheduler(tcs, e, 3059, backend.WithLogger(zaptest.NewLogger(t)))
|
||||||
o.Start(context.Background())
|
o.Start(context.Background())
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:50:00Z")
|
||||||
|
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Cron: "* * * * *",
|
Cron: "* * * * *",
|
||||||
LatestCompleted: "1970-01-01T00:50:00Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
t1, _ := time.Parse(time.RFC3339, "1970-01-01T00:02:00Z")
|
t1, _ := time.Parse(time.RFC3339, "1970-01-01T00:02:00Z")
|
||||||
|
@ -639,13 +648,14 @@ func TestScheduler_RunStatus(t *testing.T) {
|
||||||
s := backend.NewScheduler(rl, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
|
s := backend.NewScheduler(rl, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
|
||||||
s.Start(context.Background())
|
s.Start(context.Background())
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
// Claim a task that starts later.
|
// Claim a task that starts later.
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
OrganizationID: platform.ID(2),
|
OrganizationID: platform.ID(2),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -739,12 +749,13 @@ func TestScheduler_RunFailureCleanup(t *testing.T) {
|
||||||
s := backend.NewScheduler(ll, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
|
s := backend.NewScheduler(ll, e, 5, backend.WithLogger(zaptest.NewLogger(t)))
|
||||||
s.Start(context.Background())
|
s.Start(context.Background())
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
|
|
||||||
// Task with concurrency 1 should continue after one run fails.
|
// Task with concurrency 1 should continue after one run fails.
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -829,10 +840,11 @@ func TestScheduler_Metrics(t *testing.T) {
|
||||||
reg.MustRegister(s.PrometheusCollectors()...)
|
reg.MustRegister(s.PrometheusCollectors()...)
|
||||||
|
|
||||||
// Claim a task that starts later.
|
// Claim a task that starts later.
|
||||||
|
latestCompleted, _ := time.Parse(time.RFC3339, "1970-01-01T00:00:05Z")
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
LatestCompleted: "1970-01-01T00:00:05Z",
|
LatestCompleted: latestCompleted,
|
||||||
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 99, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1008,12 +1020,12 @@ func TestScheduler_WithTicker(t *testing.T) {
|
||||||
|
|
||||||
o.Start(ctx)
|
o.Start(ctx)
|
||||||
defer o.Stop()
|
defer o.Stop()
|
||||||
createdAt := time.Now()
|
createdAt := time.Now().UTC()
|
||||||
task := &platform.Task{
|
task := &platform.Task{
|
||||||
ID: platform.ID(1),
|
ID: platform.ID(1),
|
||||||
Every: "1s",
|
Every: "1s",
|
||||||
Flux: `option task = {concurrency: 5, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
Flux: `option task = {concurrency: 5, name:"x", every:1m} from(bucket:"a") |> to(bucket:"b", org: "o")`,
|
||||||
LatestCompleted: createdAt.Format(time.RFC3339Nano),
|
LatestCompleted: createdAt,
|
||||||
}
|
}
|
||||||
|
|
||||||
tcs.SetTask(task)
|
tcs.SetTask(task)
|
||||||
|
|
|
@ -112,11 +112,9 @@ func (t *TaskControlService) createNextRun(task *influxdb.Task, now int64) (back
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.RunCreation{}, err
|
return backend.RunCreation{}, err
|
||||||
}
|
}
|
||||||
latest := int64(0)
|
|
||||||
lt, err := time.Parse(time.RFC3339, task.LatestCompleted)
|
latest := task.LatestCompleted.Unix()
|
||||||
if err == nil {
|
|
||||||
latest = lt.Unix()
|
|
||||||
}
|
|
||||||
for _, r := range t.runs[task.ID] {
|
for _, r := range t.runs[task.ID] {
|
||||||
if r.ScheduledFor.Unix() > latest {
|
if r.ScheduledFor.Unix() > latest {
|
||||||
latest = r.ScheduledFor.Unix()
|
latest = r.ScheduledFor.Unix()
|
||||||
|
@ -126,13 +124,12 @@ func (t *TaskControlService) createNextRun(task *influxdb.Task, now int64) (back
|
||||||
|
|
||||||
nextScheduled := sch.Next(time.Unix(latest, 0))
|
nextScheduled := sch.Next(time.Unix(latest, 0))
|
||||||
nextScheduledUnix := nextScheduled.Unix()
|
nextScheduledUnix := nextScheduled.Unix()
|
||||||
|
|
||||||
offset := int64(0)
|
offset := int64(0)
|
||||||
if task.Offset != "" {
|
if task.Offset != 0 {
|
||||||
toff, err := time.ParseDuration(task.Offset)
|
offset = task.Offset.Nanoseconds()
|
||||||
if err == nil {
|
|
||||||
offset = toff.Nanoseconds()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if dueAt := nextScheduledUnix + int64(offset); dueAt > now {
|
if dueAt := nextScheduledUnix + int64(offset); dueAt > now {
|
||||||
return backend.RunCreation{}, influxdb.ErrRunNotDueYet(dueAt)
|
return backend.RunCreation{}, influxdb.ErrRunNotDueYet(dueAt)
|
||||||
}
|
}
|
||||||
|
@ -201,19 +198,11 @@ func (d *TaskControlService) FinishRun(_ context.Context, taskID, runID influxdb
|
||||||
r := d.runs[tid][rid]
|
r := d.runs[tid][rid]
|
||||||
delete(d.runs[tid], rid)
|
delete(d.runs[tid], rid)
|
||||||
t := d.tasks[tid]
|
t := d.tasks[tid]
|
||||||
schedFor := r.ScheduledFor.Format(time.RFC3339)
|
|
||||||
|
|
||||||
if t.LatestCompleted != "" {
|
if r.ScheduledFor.After(t.LatestCompleted) {
|
||||||
var latest time.Time
|
t.LatestCompleted = r.ScheduledFor
|
||||||
latest, err := time.Parse(time.RFC3339, t.LatestCompleted)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.ScheduledFor.After(latest) {
|
|
||||||
t.LatestCompleted = schedFor
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
d.finishedRuns[rid] = r
|
d.finishedRuns[rid] = r
|
||||||
delete(d.created, tid.String()+rid.String())
|
delete(d.created, tid.String()+rid.String())
|
||||||
return r, nil
|
return r, nil
|
||||||
|
@ -254,11 +243,8 @@ func (d *TaskControlService) nextDueRun(ctx context.Context, taskID influxdb.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
latest := int64(0)
|
|
||||||
lt, err := time.Parse(time.RFC3339, task.LatestCompleted)
|
latest := task.LatestCompleted.Unix()
|
||||||
if err == nil {
|
|
||||||
latest = lt.Unix()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, r := range d.runs[task.ID] {
|
for _, r := range d.runs[task.ID] {
|
||||||
if r.ScheduledFor.Unix() > latest {
|
if r.ScheduledFor.Unix() > latest {
|
||||||
|
@ -268,12 +254,10 @@ func (d *TaskControlService) nextDueRun(ctx context.Context, taskID influxdb.ID)
|
||||||
|
|
||||||
nextScheduled := sch.Next(time.Unix(latest, 0))
|
nextScheduled := sch.Next(time.Unix(latest, 0))
|
||||||
nextScheduledUnix := nextScheduled.Unix()
|
nextScheduledUnix := nextScheduled.Unix()
|
||||||
|
|
||||||
offset := int64(0)
|
offset := int64(0)
|
||||||
if task.Offset != "" {
|
if task.Offset != 0 {
|
||||||
toff, err := time.ParseDuration(task.Offset)
|
offset = task.Offset.Nanoseconds()
|
||||||
if err == nil {
|
|
||||||
offset = toff.Nanoseconds()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nextScheduledUnix + int64(offset), nil
|
return nextScheduledUnix + int64(offset), nil
|
||||||
|
|
|
@ -249,7 +249,7 @@ func testTaskCRUD(t *testing.T, sys *System) {
|
||||||
OwnerID: tsk.OwnerID,
|
OwnerID: tsk.OwnerID,
|
||||||
Name: "task #0",
|
Name: "task #0",
|
||||||
Cron: "* * * * *",
|
Cron: "* * * * *",
|
||||||
Offset: "5s",
|
Offset: 5 * time.Second,
|
||||||
Status: string(backend.DefaultTaskStatus),
|
Status: string(backend.DefaultTaskStatus),
|
||||||
Flux: fmt.Sprintf(scriptFmt, 0),
|
Flux: fmt.Sprintf(scriptFmt, 0),
|
||||||
Type: influxdb.TaskSystemType,
|
Type: influxdb.TaskSystemType,
|
||||||
|
@ -563,7 +563,8 @@ from(bucket: "b")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if fNoOffset.Offset != "" {
|
var zero time.Duration
|
||||||
|
if fNoOffset.Offset != zero {
|
||||||
t.Fatal("removing offset failed")
|
t.Fatal("removing offset failed")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -599,19 +600,13 @@ func testUpdate(t *testing.T, sys *System) {
|
||||||
after := time.Now()
|
after := time.Now()
|
||||||
latestCA := after.Add(time.Second)
|
latestCA := after.Add(time.Second)
|
||||||
|
|
||||||
ca, err := time.Parse(time.RFC3339, st.CreatedAt)
|
ca := st.CreatedAt
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if earliestCA.After(ca) || latestCA.Before(ca) {
|
if earliestCA.After(ca) || latestCA.Before(ca) {
|
||||||
t.Fatalf("createdAt not accurate, expected %s to be between %s and %s", ca, earliestCA, latestCA)
|
t.Fatalf("createdAt not accurate, expected %s to be between %s and %s", ca, earliestCA, latestCA)
|
||||||
}
|
}
|
||||||
|
|
||||||
ti, err := time.Parse(time.RFC3339, st.LatestCompleted)
|
ti := st.LatestCompleted
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if now.Sub(ti) > 10*time.Second {
|
if now.Sub(ti) > 10*time.Second {
|
||||||
t.Fatalf("latest completed not accurate, expected: ~%s, got %s", now, ti)
|
t.Fatalf("latest completed not accurate, expected: ~%s, got %s", now, ti)
|
||||||
|
@ -641,7 +636,7 @@ func testUpdate(t *testing.T, sys *System) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if st2.LatestCompleted <= st.LatestCompleted {
|
if st2.LatestCompleted.Before(st.LatestCompleted) {
|
||||||
t.Fatalf("executed task has not updated latest complete: expected %s > %s", st2.LatestCompleted, st.LatestCompleted)
|
t.Fatalf("executed task has not updated latest complete: expected %s > %s", st2.LatestCompleted, st.LatestCompleted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -683,7 +678,7 @@ func testUpdate(t *testing.T, sys *System) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if st3.LatestCompleted <= st2.LatestCompleted {
|
if st3.LatestCompleted.Before(st2.LatestCompleted) {
|
||||||
t.Fatalf("executed task has not updated latest complete: expected %s > %s", st3.LatestCompleted, st2.LatestCompleted)
|
t.Fatalf("executed task has not updated latest complete: expected %s > %s", st3.LatestCompleted, st2.LatestCompleted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -706,10 +701,7 @@ func testUpdate(t *testing.T, sys *System) {
|
||||||
earliestUA := now.Add(-time.Second)
|
earliestUA := now.Add(-time.Second)
|
||||||
latestUA := after.Add(time.Second)
|
latestUA := after.Add(time.Second)
|
||||||
|
|
||||||
ua, err := time.Parse(time.RFC3339, task.UpdatedAt)
|
ua := task.UpdatedAt
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if earliestUA.After(ua) || latestUA.Before(ua) {
|
if earliestUA.After(ua) || latestUA.Before(ua) {
|
||||||
t.Fatalf("updatedAt not accurate, expected %s to be between %s and %s", ua, earliestUA, latestUA)
|
t.Fatalf("updatedAt not accurate, expected %s to be between %s and %s", ua, earliestUA, latestUA)
|
||||||
|
@ -720,10 +712,7 @@ func testUpdate(t *testing.T, sys *System) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ua, err = time.Parse(time.RFC3339, st.UpdatedAt)
|
ua = st.UpdatedAt
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if earliestUA.After(ua) || latestUA.Before(ua) {
|
if earliestUA.After(ua) || latestUA.Before(ua) {
|
||||||
t.Fatalf("updatedAt not accurate after pulling new task, expected %s to be between %s and %s", ua, earliestUA, latestUA)
|
t.Fatalf("updatedAt not accurate after pulling new task, expected %s to be between %s and %s", ua, earliestUA, latestUA)
|
||||||
|
|
Loading…
Reference in New Issue