Add endpoints for managing sub-DAG runs (#915)

This commit is contained in:
YotaHamada 2025-05-05 22:47:19 -07:00 committed by GitHub
parent 50eaa37a5b
commit 5ec999ef32
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
150 changed files with 5702 additions and 3520 deletions

View File

@ -1,5 +1,6 @@
{
"editor.formatOnSave": true,
"editor.insertSpaces": true,
"[go]": {
"editor.insertSpaces": true,
"editor.formatOnSave": true,

File diff suppressed because it is too large Load Diff

View File

@ -52,7 +52,7 @@ paths:
get:
summary: "List all available DAGs"
description: "Retrieves DAGs with optional filtering by name and tags"
operationId: "listAllDAGs"
operationId: "listDAGs"
tags:
- "dags"
parameters:
@ -140,7 +140,7 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}:
/dags/{fileName}:
get:
summary: "Retrieve comprehensive DAG information"
description: "Fetches detailed information about a specific DAG"
@ -149,7 +149,7 @@ paths:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
responses:
"200":
description: "A successful response"
@ -185,12 +185,12 @@ paths:
delete:
summary: "Delete an existing DAG"
description: "Permanently removes a DAG definition from the system"
operationId: "deleteDAGByFileId"
operationId: "deleteDAG"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
responses:
"204":
description: "DAG successfully deleted"
@ -207,7 +207,7 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/start:
/dags/{fileName}/start:
post:
summary: "Initiate DAG execution"
description: "Starts execution of a DAG with optional parameters"
@ -216,7 +216,7 @@ paths:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
requestBody:
required: true
content:
@ -230,6 +230,16 @@ paths:
responses:
"200":
description: "A successful response"
content:
application/json:
schema:
type: object
properties:
requestId:
type: string
description: "Request ID of the initiated DAG run"
required:
- requestId
default:
description: "Generic error response"
content:
@ -237,16 +247,16 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/stop:
/dags/{fileName}/stop:
post:
summary: "Terminate running DAG execution"
description: "Forcefully stops a running DAG workflow"
operationId: "terminateDAGExecution"
operationId: "terminateDAGRun"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
responses:
"200":
description: "A successful response"
@ -257,16 +267,16 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/retry:
/dags/{fileName}/retry:
post:
summary: "Retry DAG execution"
description: "Reruns a DAG execution"
operationId: "retryDAGExecution"
operationId: "retryDAGRun"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
requestBody:
required: true
content:
@ -289,16 +299,16 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/runs:
/dags/{fileName}/runs:
get:
summary: "Retrieve execution history of a DAG"
description: "Fetches execution history of a DAG"
operationId: "getDAGExecutionHistory"
operationId: "getDAGRunHistory"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
responses:
"200":
description: "A successful response"
@ -327,7 +337,7 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/runs/{requestId}:
/dags/{fileName}/runs/{requestId}:
get:
summary: "Get detailed status of a specific DAG run"
description: "Retrieves status information about a particular DAG execution"
@ -336,7 +346,7 @@ paths:
operationId: "getDAGRunDetails"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
- $ref: "#/components/parameters/RequestId"
responses:
"200":
@ -357,16 +367,16 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/spec:
/dags/{fileName}/spec:
get:
summary: "Retrieve DAG definition"
description: "Fetches the YAML definition of a DAG"
operationId: "getDAGDefinition"
summary: "Retrieve DAG specification"
description: "Fetches the specification of a DAG"
operationId: "getDAGSpec"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
responses:
"200":
description: "A successful response"
@ -395,14 +405,14 @@ paths:
schema:
$ref: "#/components/schemas/Error"
put:
summary: "Update DAG definition"
description: "Modifies the YAML definition of a DAG"
operationId: "updateDAGDefinition"
summary: "Update DAG spec"
description: "Modifies the specification of a DAG"
operationId: "updateDAGSpec"
tags:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
requestBody:
required: true
content:
@ -438,7 +448,7 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/suspend:
/dags/{fileName}/suspend:
post:
summary: "Toggle DAG suspension state"
description: "Controls whether the scheduler should execute this DAG according to its defined cron schedule"
@ -447,7 +457,7 @@ paths:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
requestBody:
required: true
content:
@ -476,7 +486,7 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/dags/{fileId}/rename:
/dags/{fileName}/rename:
post:
summary: "Change DAG file ID"
description: "Changes the file ID of the DAG"
@ -485,7 +495,7 @@ paths:
- "dags"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGFileId"
- $ref: "#/components/parameters/DAGFileName"
requestBody:
required: true
content:
@ -493,11 +503,11 @@ paths:
schema:
type: object
properties:
newFileId:
newFileName:
type: string
description: "New file ID for the DAG"
description: "New file name for the DAG"
required:
- newFileId
- newFileName
responses:
"200":
description: "A successful response"
@ -522,9 +532,9 @@ paths:
/dags/search:
get:
summary: "Search across all DAG definitions"
summary: "Search DAGs"
description: "Performs a full-text search across all DAG definitions"
operationId: "searchDAGDefinitions"
operationId: "searchDAGs"
tags:
- "dags"
parameters:
@ -587,11 +597,47 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}:
get:
summary: "Retrieve detailed status of a DAG run"
description: "Fetches detailed status information about a specific DAG run"
operationId: "getRunDetails"
tags:
- "runs"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGName"
- $ref: "#/components/parameters/RequestId"
responses:
"200":
description: "A successful response"
content:
application/json:
schema:
type: object
properties:
runDetails:
$ref: "#/components/schemas/RunDetails"
required:
- runDetails
"404":
description: "DAG run not found"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: "Generic error response"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/log:
get:
summary: "Retrieve full execution log of a DAG run"
description: "Fetches the execution log for a DAG run"
operationId: "getDAGRunLog"
operationId: "getRunLog"
tags:
- "runs"
parameters:
@ -618,11 +664,11 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/{stepName}/log:
/runs/{dagName}/{requestId}/steps/{stepName}/log:
get:
summary: "Retrieve log for a specific step in a DAG run"
description: "Fetches the log for an individual step in a DAG run"
operationId: "getDAGStepLog"
operationId: "getRunStepLog"
tags:
- "runs"
parameters:
@ -650,11 +696,11 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/{stepName}/status:
/runs/{dagName}/{requestId}/steps/{stepName}/status:
patch:
summary: "Manually update a step's execution status"
description: "Changes the status of a specific step within a DAG run"
operationId: "updateDAGStepStatus"
operationId: "updateRunStepStatus"
tags:
- "runs"
parameters:
@ -695,6 +741,174 @@ paths:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/subs/{subRunRequestId}:
get:
summary: "Retrieve detailed status of a sub run"
description: "Fetches detailed status information about a specific sub-run"
operationId: "getSubRunDetails"
tags:
- "runs"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGName"
- $ref: "#/components/parameters/RequestId"
- name: "subRunRequestId"
in: "path"
required: true
schema:
type: "string"
description: "ID of the sub run to retrieve details for"
responses:
"200":
description: "A successful response"
content:
application/json:
schema:
type: object
properties:
runDetails:
$ref: "#/components/schemas/RunDetails"
required:
- runDetails
"404":
description: "Sub-run not found"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: "Generic error response"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/subs/{subRunRequestId}/log:
get:
summary: "Retrieve log for a specific sub run"
description: "Fetches the log for an individual sub-run"
operationId: "getSubRunLog"
tags:
- "runs"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGName"
- $ref: "#/components/parameters/RequestId"
- name: "subRunRequestId"
in: "path"
required: true
schema:
type: "string"
description: "ID of the sub run to retrieve the log for"
responses:
"200":
description: "A successful response"
content:
application/json:
schema:
$ref: "#/components/schemas/Log"
"404":
description: "Log file not found"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: "Generic error response"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/subs/{subRunRequestId}/steps/{stepName}/log:
get:
summary: "Retrieve log for a specific step in a sub run"
description: "Fetches the log for an individual step in a sub-run"
operationId: "getSubRunStepLog"
tags:
- "runs"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGName"
- $ref: "#/components/parameters/RequestId"
- name: "subRunRequestId"
in: "path"
required: true
schema:
type: "string"
description: "ID of the sub run to retrieve the log for"
- $ref: "#/components/parameters/StepName"
responses:
"200":
description: "A successful response"
content:
application/json:
schema:
$ref: "#/components/schemas/Log"
"404":
description: "Log file not found"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: "Generic error response"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
/runs/{dagName}/{requestId}/subs/{subRunRequestId}/steps/{stepName}/status:
patch:
summary: "Manually update a step's execution status in a sub run"
description: "Changes the status of a specific step within a sub-run"
operationId: "updateSubRunStepStatus"
tags:
- "runs"
parameters:
- $ref: "#/components/parameters/RemoteNode"
- $ref: "#/components/parameters/DAGName"
- $ref: "#/components/parameters/RequestId"
- name: "subRunRequestId"
in: "path"
required: true
schema:
type: "string"
description: "ID of the sub run to update the step status for"
- $ref: "#/components/parameters/StepName"
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
status:
$ref: "#/components/schemas/NodeStatus"
required:
- status
responses:
"200":
description: "A successful response"
"400":
description: "Invalid request"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
"404":
description: "Run or step not found"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
default:
description: "Generic error response"
content:
application/json:
schema:
$ref: "#/components/schemas/Error"
components:
securitySchemes:
basicAuth:
@ -727,13 +941,13 @@ components:
maximum: 1000
default: 50
DAGFileId:
name: fileId
DAGFileName:
name: fileName
in: path
description: File Id of the DAG file
description: the name of the DAG file
required: true
schema:
$ref: "#/components/schemas/DAGFileId"
$ref: "#/components/schemas/DAGFileName"
DAGName:
name: dagName
@ -799,12 +1013,12 @@ components:
- "not_running"
- "already_exists"
DAGFileId:
DAGFileName:
type: string
# only allows alphanumeric characters, underscores, and hyphens
format: "regex"
pattern: "^[a-zA-Z0-9_-]+$"
description: "location of the DAG file"
description: "Name of the DAG file"
DAGName:
type: string
@ -871,7 +1085,7 @@ components:
type: object
description: "DAG file with its status information"
properties:
fileId:
fileName:
type: string
description: "File ID of the DAG file"
dag:
@ -887,7 +1101,7 @@ components:
items:
type: string
required:
- fileId
- fileName
- dag
- latestRun
- suspended
@ -955,7 +1169,7 @@ components:
3: "Cancelled"
4: "Success"
StatusText:
StatusLabel:
type: string
description: "Human-readable status description for the DAG run"
enum:
@ -984,7 +1198,7 @@ components:
4: "Success"
5: "Skipped"
NodeStatusText:
NodeStatusLabel:
type: string
description: "Human-readable status description for the node"
enum:
@ -1082,8 +1296,8 @@ components:
$ref: "#/components/schemas/DAGName"
status:
$ref: "#/components/schemas/Status"
statusText:
$ref: "#/components/schemas/StatusText"
statusLabel:
$ref: "#/components/schemas/StatusLabel"
pid:
type: integer
description: "Process ID of the DAG run"
@ -1103,14 +1317,14 @@ components:
- requestId
- name
- status
- statusText
- statusLabel
- startedAt
- finishedAt
- log
RunDetails:
type: object
description: "Detailed status of a DAG run including child nodes"
description: "Detailed status of a DAG run including sub-run nodes"
allOf:
- $ref: "#/components/schemas/RunSummary"
- type: object
@ -1149,14 +1363,19 @@ components:
description: "RFC3339 timestamp when the step finished"
status:
$ref: "#/components/schemas/NodeStatus"
statusText:
$ref: "#/components/schemas/NodeStatusText"
statusLabel:
$ref: "#/components/schemas/NodeStatusLabel"
retryCount:
type: integer
description: "Number of retry attempts made for this step"
doneCount:
type: integer
description: "Number of successful completions for repeating steps"
subRuns:
type: array
description: "List of sub-runs associated with this step"
items:
$ref: "#/components/schemas/SubRun"
error:
type: string
description: "Error message if the step failed"
@ -1166,10 +1385,19 @@ components:
- startedAt
- finishedAt
- status
- statusText
- statusLabel
- retryCount
- doneCount
SubRun:
type: object
description: "Metadata for a sub run"
properties:
requestId:
$ref: "#/components/schemas/RequestId"
required:
- requestId
Step:
type: object
description: "Individual task within a DAG that performs a specific operation"
@ -1208,10 +1436,10 @@ components:
type: string
run:
type: string
description: "The name of the DAG to run as a sub-DAG"
description: "The name of the DAG to run as a sub-run"
params:
type: string
description: "Parameters to pass to the sub DAG in JSON format"
description: "Parameters to pass to the sub-run in JSON format"
depends:
type: array
description: "List of step names that must complete before this step can start"

View File

@ -15,12 +15,12 @@ import (
"syscall"
"time"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/mailer"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/sock"
)
@ -33,20 +33,20 @@ import (
type Agent struct {
dag *digraph.DAG
dry bool
retryTarget *persistence.Status
dagStore persistence.DAGStore
client client.Client
retryTarget *runstore.Status
dagStore dagstore.Store
client runstore.Client
scheduler *scheduler.Scheduler
graph *scheduler.ExecutionGraph
reporter *reporter
historyStore persistence.HistoryStore
runStore runstore.Store
socketServer *sock.Server
logDir string
logFile string
rootDAG digraph.RootDAG
// requestID is request ID to identify DAG run uniquely.
// The request ID can be used for history lookup, retry, etc.
// The request ID can be used for runstore lookup, retry, etc.
requestID string
finished atomic.Bool
@ -60,12 +60,12 @@ type Agent struct {
// Options is the configuration for the Agent.
type Options struct {
// Dry is a dry-run mode. It does not execute the actual command.
// Dry run does not create history data.
// Dry run does not create runstore data.
Dry bool
// RetryTarget is the target status (history of execution) to retry.
// RetryTarget is the target status (runstore of execution) to retry.
// If it's specified the agent will execute the DAG with the same
// configuration as the specified history.
RetryTarget *persistence.Status
// configuration as the specified runstore.
RetryTarget *runstore.Status
}
// New creates a new Agent.
@ -74,23 +74,23 @@ func New(
dag *digraph.DAG,
logDir string,
logFile string,
cli client.Client,
dagStore persistence.DAGStore,
historyStore persistence.HistoryStore,
cli runstore.Client,
dagStore dagstore.Store,
runStore runstore.Store,
rootDAG digraph.RootDAG,
opts Options,
) *Agent {
return &Agent{
rootDAG: rootDAG,
requestID: requestID,
dag: dag,
dry: opts.Dry,
retryTarget: opts.RetryTarget,
logDir: logDir,
logFile: logFile,
client: cli,
dagStore: dagStore,
historyStore: historyStore,
rootDAG: rootDAG,
requestID: requestID,
dag: dag,
dry: opts.Dry,
retryTarget: opts.RetryTarget,
logDir: logDir,
logFile: logFile,
client: cli,
dagStore: dagStore,
runStore: runStore,
}
}
@ -104,7 +104,7 @@ func (a *Agent) Run(ctx context.Context) error {
}
// Create a new context for the DAG run with all necessary information
dbClient := newDBClient(a.historyStore, a.dagStore)
dbClient := newDBClient(a.runStore, a.dagStore)
ctx = digraph.NewContext(ctx, a.dag, dbClient, a.rootDAG, a.requestID, a.logFile, a.dag.Params)
// Add structured logging context
@ -132,18 +132,18 @@ func (a *Agent) Run(ctx context.Context) error {
}
// Make a connection to the database.
// It should close the connection to the history database when the DAG
// It should close the connection to the runstore database when the DAG
// execution is finished.
historyRecord, err := a.setupHistoryRecord(ctx)
historyRecord, err := a.setupRunRecord(ctx)
if err != nil {
return fmt.Errorf("failed to setup history record: %w", err)
return fmt.Errorf("failed to setup runstore record: %w", err)
}
if err := historyRecord.Open(ctx); err != nil {
return fmt.Errorf("failed to open history record: %w", err)
return fmt.Errorf("failed to open runstore record: %w", err)
}
defer func() {
if err := historyRecord.Close(ctx); err != nil {
logger.Error(ctx, "Failed to close history store", "err", err)
logger.Error(ctx, "Failed to close runstore store", "err", err)
}
}()
@ -210,7 +210,7 @@ func (a *Agent) Run(ctx context.Context) error {
logger.Debug(ctx, "DAG run started", "reqId", a.requestID, "name", a.dag.Name, "params", a.dag.Params)
lastErr := a.scheduler.Schedule(ctx, a.graph, done)
// Update the finished status to the history database.
// Update the finished status to the runstore database.
finishedStatus := a.Status()
logger.Info(ctx, "DAG run finished", "status", finishedStatus.Status.String())
if err := historyRecord.Write(ctx, a.Status()); err != nil {
@ -237,7 +237,7 @@ func (a *Agent) PrintSummary(ctx context.Context) {
}
// Status collects the current running status of the DAG and returns it.
func (a *Agent) Status() persistence.Status {
func (a *Agent) Status() runstore.Status {
// Lock to avoid race condition.
a.lock.RLock()
defer a.lock.RUnlock()
@ -248,22 +248,22 @@ func (a *Agent) Status() persistence.Status {
schedulerStatus = scheduler.StatusRunning
}
opts := []persistence.StatusOption{
persistence.WithFinishedAt(a.graph.FinishAt()),
persistence.WithNodes(a.graph.NodeData()),
persistence.WithLogFilePath(a.logFile),
persistence.WithOnExitNode(a.scheduler.HandlerNode(digraph.HandlerOnExit)),
persistence.WithOnSuccessNode(a.scheduler.HandlerNode(digraph.HandlerOnSuccess)),
persistence.WithOnFailureNode(a.scheduler.HandlerNode(digraph.HandlerOnFailure)),
persistence.WithOnCancelNode(a.scheduler.HandlerNode(digraph.HandlerOnCancel)),
opts := []runstore.StatusOption{
runstore.WithFinishedAt(a.graph.FinishAt()),
runstore.WithNodes(a.graph.NodeData()),
runstore.WithLogFilePath(a.logFile),
runstore.WithOnExitNode(a.scheduler.HandlerNode(digraph.HandlerOnExit)),
runstore.WithOnSuccessNode(a.scheduler.HandlerNode(digraph.HandlerOnSuccess)),
runstore.WithOnFailureNode(a.scheduler.HandlerNode(digraph.HandlerOnFailure)),
runstore.WithOnCancelNode(a.scheduler.HandlerNode(digraph.HandlerOnCancel)),
}
if a.subExecution.Load() {
opts = append(opts, persistence.WithRootDAG(a.rootDAG))
opts = append(opts, runstore.WithRootDAG(a.rootDAG))
}
// Create the status object to record the current status.
return persistence.NewStatusFactory(a.dag).
return runstore.NewStatusBuilder(a.dag).
Create(
a.requestID,
schedulerStatus,
@ -400,7 +400,7 @@ func (a *Agent) dryRun(ctx context.Context) error {
logger.Info(ctx, "Dry-run started", "reqId", a.requestID, "name", a.dag.Name, "params", a.dag.Params)
dagCtx := digraph.NewContext(context.Background(), a.dag, newDBClient(a.historyStore, a.dagStore), a.rootDAG, a.requestID, a.logFile, a.dag.Params)
dagCtx := digraph.NewContext(context.Background(), a.dag, newDBClient(a.runStore, a.dagStore), a.rootDAG, a.requestID, a.logFile, a.dag.Params)
lastErr := a.scheduler.Schedule(dagCtx, a.graph, done)
a.lastErr = lastErr
@ -489,18 +489,18 @@ func (a *Agent) setupGraphForRetry(ctx context.Context) error {
return nil
}
func (a *Agent) setupHistoryRecord(ctx context.Context) (persistence.Record, error) {
func (a *Agent) setupRunRecord(ctx context.Context) (runstore.Record, error) {
retentionDays := a.dag.HistRetentionDays
if err := a.historyStore.RemoveOld(ctx, a.dag.Name, retentionDays); err != nil {
if err := a.runStore.RemoveOld(ctx, a.dag.Name, retentionDays); err != nil {
logger.Error(ctx, "History data cleanup failed", "err", err)
}
opts := persistence.NewRecordOptions{Retry: a.retryTarget != nil}
opts := runstore.NewRecordOptions{Retry: a.retryTarget != nil}
if a.subExecution.Load() {
opts.Root = &a.rootDAG
}
return a.historyStore.NewRecord(ctx, a.dag, time.Now(), a.requestID, opts)
return a.runStore.NewRecord(ctx, a.dag, time.Now(), a.requestID, opts)
}
// setupSocketServer create socket server instance.
@ -528,7 +528,7 @@ func (a *Agent) checkPreconditions(ctx context.Context) error {
}
// If one of the conditions does not met, cancel the execution.
if err := digraph.EvalConditions(ctx, a.dag.Preconditions); err != nil {
logger.Error(ctx, "Preconditions are not met", "err", err)
logger.Info(ctx, "Preconditions are not met", "err", err)
a.scheduler.Cancel(ctx, a.graph)
return err
}
@ -540,12 +540,8 @@ func (a *Agent) checkIsAlreadyRunning(ctx context.Context) error {
if a.subExecution.Load() {
return nil // Skip the check for sub-DAGs
}
status, err := a.client.GetCurrentStatus(ctx, a.dag)
if err != nil {
return err
}
if status.Status != scheduler.StatusNone {
return fmt.Errorf("the DAG is already running. status=%s, socket=%s", status.Status, a.dag.SockAddr(a.requestID))
if a.client.IsRunning(ctx, a.dag, a.requestID) {
return fmt.Errorf("the DAG is already running. requestID=%s, socket=%s", a.requestID, a.dag.SockAddr(a.requestID))
}
return nil
}
@ -602,14 +598,14 @@ func encodeError(w http.ResponseWriter, err error) {
var _ digraph.DBClient = &dbClient{}
type dbClient struct {
dagStore persistence.DAGStore
historyStore persistence.HistoryStore
dagStore dagstore.Store
runStore runstore.Store
}
func newDBClient(h persistence.HistoryStore, d persistence.DAGStore) *dbClient {
func newDBClient(h runstore.Store, d dagstore.Store) *dbClient {
return &dbClient{
historyStore: h,
dagStore: d,
runStore: h,
dagStore: d,
}
}
@ -619,11 +615,11 @@ func (o *dbClient) GetDAG(ctx context.Context, name string) (*digraph.DAG, error
}
func (o *dbClient) GetSubStatus(ctx context.Context, reqID string, rootDAG digraph.RootDAG) (*digraph.Status, error) {
historyRecord, err := o.historyStore.FindBySubRequestID(ctx, reqID, rootDAG)
runRecord, err := o.runStore.FindBySubRunRequestID(ctx, reqID, rootDAG)
if err != nil {
return nil, err
}
status, err := historyRecord.ReadStatus(ctx)
status, err := runRecord.ReadStatus(ctx)
if err != nil {
return nil, err
}

View File

@ -6,7 +6,7 @@ import (
"testing"
"github.com/dagu-org/dagu/internal/agent"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/test"
"github.com/dagu-org/dagu/internal/digraph"
@ -35,18 +35,18 @@ func TestAgent_Run(t *testing.T) {
dag := th.DAG(t, "agent/delete_old_history.yaml")
dagAgent := dag.Agent()
// Create a history file by running a DAG
// Create a runstore file by running a DAG
dagAgent.RunSuccess(t)
dag.AssertHistoryCount(t, 1)
// Set the retention days to 0 (delete all history files except the latest one)
// Set the retention days to 0 (delete all runstore files except the latest one)
dag.HistRetentionDays = 0
// Run the DAG again
dagAgent = dag.Agent()
dagAgent.RunSuccess(t)
// Check if only the latest history file exists
// Check if only the latest runstore file exists
dag.AssertHistoryCount(t, 1)
})
t.Run("AlreadyRunning", func(t *testing.T) {
@ -110,9 +110,11 @@ func TestAgent_Run(t *testing.T) {
th := test.Setup(t)
dag := th.DAG(t, "agent/sleep.yaml")
dagAgent := dag.Agent()
done := make(chan struct{})
go func() {
dagAgent.RunCancel(t)
close(done)
}()
// wait for the DAG to start
@ -121,6 +123,8 @@ func TestAgent_Run(t *testing.T) {
// send a signal to cancel the DAG
dagAgent.Abort()
<-done
// wait for the DAG to be canceled
dag.AssertLatestStatus(t, scheduler.StatusCancel)
})
@ -215,7 +219,7 @@ func TestAgent_HandleHTTP(t *testing.T) {
require.Equal(t, http.StatusOK, mockResponseWriter.status)
// Check if the status is returned correctly
status, err := persistence.StatusFromJSON(mockResponseWriter.body)
status, err := runstore.StatusFromJSON(mockResponseWriter.body)
require.NoError(t, err)
require.Equal(t, scheduler.StatusRunning, status.Status)

View File

@ -9,7 +9,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/jedib0t/go-pretty/v6/table"
)
@ -31,7 +31,7 @@ func newReporter(f SenderFn) *reporter {
// reportStep is a function that reports the status of a step.
func (r *reporter) reportStep(
ctx context.Context, dag *digraph.DAG, status persistence.Status, node *scheduler.Node,
ctx context.Context, dag *digraph.DAG, status runstore.Status, node *scheduler.Node,
) error {
nodeStatus := node.State().Status
if nodeStatus != scheduler.NodeStatusNone {
@ -49,7 +49,7 @@ func (r *reporter) reportStep(
}
// report is a function that reports the status of the scheduler.
func (r *reporter) getSummary(_ context.Context, status persistence.Status, err error) string {
func (r *reporter) getSummary(_ context.Context, status runstore.Status, err error) string {
var buf bytes.Buffer
_, _ = buf.Write([]byte("\n"))
_, _ = buf.Write([]byte("Summary ->\n"))
@ -61,7 +61,7 @@ func (r *reporter) getSummary(_ context.Context, status persistence.Status, err
}
// send is a function that sends a report mail.
func (r *reporter) send(ctx context.Context, dag *digraph.DAG, status persistence.Status, err error) error {
func (r *reporter) send(ctx context.Context, dag *digraph.DAG, status runstore.Status, err error) error {
if err != nil || status.Status == scheduler.StatusError {
if dag.MailOn != nil && dag.MailOn.Failure && dag.ErrorMail != nil {
fromAddress := dag.ErrorMail.From
@ -94,7 +94,7 @@ var dagHeader = table.Row{
"Error",
}
func renderDAGSummary(status persistence.Status, err error) string {
func renderDAGSummary(status runstore.Status, err error) string {
dataRow := table.Row{
status.RequestID,
status.Name,
@ -125,7 +125,7 @@ var stepHeader = table.Row{
"Error",
}
func renderStepSummary(nodes []*persistence.Node) string {
func renderStepSummary(nodes []*runstore.Node) string {
stepTable := table.NewWriter()
stepTable.AppendHeader(stepHeader)
@ -136,7 +136,7 @@ func renderStepSummary(nodes []*persistence.Node) string {
n.Step.Name,
n.StartedAt,
n.FinishedAt,
n.StatusText,
n.Status.String(),
}
if n.Step.Args != nil {
dataRow = append(dataRow, strings.Join(n.Step.Args, " "))
@ -150,7 +150,7 @@ func renderStepSummary(nodes []*persistence.Node) string {
return stepTable.Render()
}
func renderHTML(nodes []*persistence.Node) string {
func renderHTML(nodes []*runstore.Node) string {
var buffer bytes.Buffer
addValFunc := func(val string) {
_, _ = buffer.WriteString(
@ -198,7 +198,7 @@ func renderHTML(nodes []*persistence.Node) string {
}
func addAttachments(
trigger bool, nodes []*persistence.Node,
trigger bool, nodes []*runstore.Node,
) (attachments []string) {
if trigger {
for _, n := range nodes {

View File

@ -9,14 +9,14 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/stringutil"
"github.com/stretchr/testify/require"
)
func TestReporter(t *testing.T) {
for scenario, fn := range map[string]func(
t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*persistence.Node,
t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*runstore.Node,
){
"create error mail": testErrorMail,
"no error mail": testNoErrorMail,
@ -49,7 +49,7 @@ func TestReporter(t *testing.T) {
},
}
nodes := []*persistence.Node{
nodes := []*runstore.Node{
{
Step: digraph.Step{
Name: "test-step",
@ -70,11 +70,11 @@ func TestReporter(t *testing.T) {
}
}
func testErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*persistence.Node) {
func testErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*runstore.Node) {
dag.MailOn.Failure = true
dag.MailOn.Success = false
_ = rp.send(context.Background(), dag, persistence.Status{
_ = rp.send(context.Background(), dag, runstore.Status{
Status: scheduler.StatusError,
Nodes: nodes,
}, fmt.Errorf("Error"))
@ -84,11 +84,11 @@ func testErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DA
require.Equal(t, 1, mock.count)
}
func testNoErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*persistence.Node) {
func testNoErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*runstore.Node) {
dag.MailOn.Failure = false
dag.MailOn.Success = true
err := rp.send(context.Background(), dag, persistence.Status{
err := rp.send(context.Background(), dag, runstore.Status{
Status: scheduler.StatusError,
Nodes: nodes,
}, nil)
@ -96,11 +96,11 @@ func testNoErrorMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.
require.Equal(t, 0, mock.count)
}
func testSuccessMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*persistence.Node) {
func testSuccessMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.DAG, nodes []*runstore.Node) {
dag.MailOn.Failure = true
dag.MailOn.Success = true
err := rp.send(context.Background(), dag, persistence.Status{
err := rp.send(context.Background(), dag, runstore.Status{
Status: scheduler.StatusSuccess,
Nodes: nodes,
}, nil)
@ -111,14 +111,14 @@ func testSuccessMail(t *testing.T, rp *reporter, mock *mockSender, dag *digraph.
require.Equal(t, 1, mock.count)
}
func testRenderSummary(t *testing.T, _ *reporter, _ *mockSender, dag *digraph.DAG, _ []*persistence.Node) {
status := persistence.NewStatusFactory(dag).Create("request-id", scheduler.StatusError, 0, time.Now())
func testRenderSummary(t *testing.T, _ *reporter, _ *mockSender, dag *digraph.DAG, _ []*runstore.Node) {
status := runstore.NewStatusBuilder(dag).Create("request-id", scheduler.StatusError, 0, time.Now())
summary := renderDAGSummary(status, errors.New("test error"))
require.Contains(t, summary, "test error")
require.Contains(t, summary, dag.Name)
}
func testRenderTable(t *testing.T, _ *reporter, _ *mockSender, _ *digraph.DAG, nodes []*persistence.Node) {
func testRenderTable(t *testing.T, _ *reporter, _ *mockSender, _ *digraph.DAG, nodes []*runstore.Node) {
summary := renderStepSummary(nodes)
require.Contains(t, summary, nodes[0].Step.Name)
require.Contains(t, summary, nodes[0].Step.Args[0])

View File

@ -1,385 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/sock"
)
// New creates a new Client instance.
// The Client is used to interact with the DAG.
func New(
dagStore persistence.DAGStore,
historyStore persistence.HistoryStore,
flagStore persistence.FlagStore,
executable string,
workDir string,
) Client {
return &client{
dagStore: dagStore,
historyStore: historyStore,
flagStore: flagStore,
executable: executable,
workDir: workDir,
}
}
var _ Client = (*client)(nil)
type client struct {
dagStore persistence.DAGStore
historyStore persistence.HistoryStore
flagStore persistence.FlagStore
executable string
workDir string
}
var (
dagTemplate = []byte(`steps:
- name: step1
command: echo hello
`)
)
func (e *client) LoadYAML(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error) {
return e.dagStore.LoadSpec(ctx, spec, opts...)
}
func (e *client) GetDAGSpec(ctx context.Context, id string) (string, error) {
return e.dagStore.GetSpec(ctx, id)
}
func (e *client) CreateDAG(ctx context.Context, name string) (string, error) {
id, err := e.dagStore.Create(ctx, name, dagTemplate)
if err != nil {
return "", fmt.Errorf("failed to create DAG: %w", err)
}
return id, nil
}
func (e *client) GrepDAG(ctx context.Context, pattern string) (
[]*persistence.GrepResult, []string, error,
) {
return e.dagStore.Grep(ctx, pattern)
}
func (e *client) MoveDAG(ctx context.Context, oldLoc, newLoc string) error {
oldDAG, err := e.dagStore.GetMetadata(ctx, oldLoc)
if err != nil {
return fmt.Errorf("failed to get metadata for %s: %w", oldLoc, err)
}
if err := e.dagStore.Rename(ctx, oldLoc, newLoc); err != nil {
return err
}
newDAG, err := e.dagStore.GetMetadata(ctx, newLoc)
if err != nil {
return fmt.Errorf("failed to get metadata for %s: %w", newLoc, err)
}
if err := e.historyStore.Rename(ctx, oldDAG.Name, newDAG.Name); err != nil {
return fmt.Errorf("failed to rename history for %s: %w", oldLoc, err)
}
return nil
}
func (e *client) StopDAG(ctx context.Context, dag *digraph.DAG) error {
logger.Info(ctx, "Stopping", "name", dag.Name)
addr := dag.SockAddr("") // FIXME: Should handle the case of dynamic DAG
if !fileutil.FileExists(addr) {
logger.Info(ctx, "The DAG is not running", "name", dag.Name)
return nil
}
client := sock.NewClient(addr)
_, err := client.Request("POST", "/stop")
return err
}
func (e *client) StartDAG(_ context.Context, dag *digraph.DAG, opts StartOptions) error {
args := []string{"start"}
if opts.Params != "" {
args = append(args, "-p")
args = append(args, fmt.Sprintf(`"%s"`, escapeArg(opts.Params)))
}
if opts.Quiet {
args = append(args, "-q")
}
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Start()
}
func (e *client) RestartDAG(_ context.Context, dag *digraph.DAG, opts RestartOptions) error {
args := []string{"restart"}
if opts.Quiet {
args = append(args, "-q")
}
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
return cmd.Start()
}
func (e *client) RetryDAG(_ context.Context, dag *digraph.DAG, requestID string) error {
args := []string{"retry"}
args = append(args, fmt.Sprintf("--request-id=%s", requestID))
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
return cmd.Start()
}
func (*client) GetCurrentStatus(_ context.Context, dag *digraph.DAG) (*persistence.Status, error) {
// FIXME: Should handle the case of dynamic DAG
client := sock.NewClient(dag.SockAddr(""))
ret, err := client.Request("GET", "/status")
if err != nil {
if errors.Is(err, sock.ErrTimeout) {
return nil, err
}
// The DAG is not running so return the default status
status := persistence.NewStatusFactory(dag).Default()
return &status, nil
}
return persistence.StatusFromJSON(ret)
}
func (e *client) GetStatus(ctx context.Context, name string, requestID string) (*persistence.Status, error) {
record, err := e.historyStore.FindByRequestID(ctx, name, requestID)
if err != nil {
return nil, fmt.Errorf("failed to find status by request id: %w", err)
}
latestStatus, err := record.ReadStatus(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read status: %w", err)
}
return latestStatus, nil
}
func (e *client) GetStatusByRequestID(ctx context.Context, dag *digraph.DAG, requestID string) (
*persistence.Status, error,
) {
record, err := e.historyStore.FindByRequestID(ctx, dag.Name, requestID)
if err != nil {
return nil, fmt.Errorf("failed to find status by request id: %w", err)
}
latestStatus, err := record.ReadStatus(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read status: %w", err)
}
// If the DAG is running, set the currentStatus to error if the request ID does not match
// Because the DAG run must be stopped
// TODO: Handle different request IDs for the same DAG
currentStatus, _ := e.GetCurrentStatus(ctx, dag)
if currentStatus != nil && currentStatus.RequestID != requestID {
latestStatus.SetStatusToErrorIfRunning()
}
return latestStatus, err
}
func (*client) currentStatus(_ context.Context, dag *digraph.DAG) (*persistence.Status, error) {
// FIXME: Should handle the case of dynamic DAG
client := sock.NewClient(dag.SockAddr(""))
statusJSON, err := client.Request("GET", "/status")
if err != nil {
return nil, fmt.Errorf("failed to get status: %w", err)
}
return persistence.StatusFromJSON(statusJSON)
}
func (e *client) GetLatestStatus(ctx context.Context, dag *digraph.DAG) (persistence.Status, error) {
currStatus, _ := e.currentStatus(ctx, dag)
if currStatus != nil {
return *currStatus, nil
}
var latestStatus *persistence.Status
record, err := e.historyStore.Latest(ctx, dag.Name)
if err != nil {
goto handleError
}
latestStatus, err = record.ReadStatus(ctx)
if err != nil {
goto handleError
}
latestStatus.SetStatusToErrorIfRunning()
return *latestStatus, nil
handleError:
if errors.Is(err, persistence.ErrNoStatusData) {
// No status for today
return persistence.NewStatusFactory(dag).Default(), nil
}
return persistence.NewStatusFactory(dag).Default(), err
}
func (e *client) GetRecentHistory(ctx context.Context, name string, n int) []persistence.Run {
records := e.historyStore.Recent(ctx, name, n)
var runs []persistence.Run
for _, record := range records {
if run, err := record.ReadRun(ctx); err == nil {
runs = append(runs, *run)
}
}
return runs
}
func (e *client) UpdateStatus(ctx context.Context, name string, status persistence.Status) error {
return e.historyStore.Update(ctx, name, status.RequestID, status)
}
func (e *client) UpdateDAG(ctx context.Context, id string, spec string) error {
return e.dagStore.UpdateSpec(ctx, id, []byte(spec))
}
func (e *client) DeleteDAG(ctx context.Context, name string) error {
return e.dagStore.Delete(ctx, name)
}
func (e *client) ListStatus(ctx context.Context, opts ...ListStatusOption) (*persistence.PaginatedResult[DAGStatus], []string, error) {
var options GetAllStatusOptions
for _, opt := range opts {
opt(&options)
}
if options.Limit == nil {
options.Limit = new(int)
*options.Limit = 100
}
if options.Page == nil {
options.Page = new(int)
*options.Page = 1
}
pg := persistence.NewPaginator(*options.Page, *options.Limit)
dags, errList, err := e.dagStore.List(ctx, persistence.ListOptions{
Paginator: &pg,
Name: fromPtr(options.Name),
Tag: fromPtr(options.Tag),
})
if err != nil {
return nil, errList, err
}
var items []DAGStatus
for _, d := range dags.Items {
status, err := e.readStatus(ctx, d)
if err != nil {
errList = append(errList, err.Error())
}
items = append(items, status)
}
r := persistence.NewPaginatedResult(items, dags.TotalCount, pg)
return &r, errList, nil
}
func (e *client) getDAG(ctx context.Context, loc string) (*digraph.DAG, error) {
dagDetail, err := e.dagStore.GetDetails(ctx, loc)
return e.emptyDAGIfNil(dagDetail, loc), err
}
func (e *client) GetDAGStatus(ctx context.Context, loc string) (DAGStatus, error) {
dag, err := e.getDAG(ctx, loc)
if dag == nil {
// TODO: fix not to use location
dag = &digraph.DAG{Name: loc, Location: loc}
}
if err == nil {
// check the dag is correct in terms of graph
_, err = scheduler.NewExecutionGraph(dag.Steps...)
}
latestStatus, _ := e.GetLatestStatus(ctx, dag)
return newDAGStatus(
dag, latestStatus, e.IsSuspended(ctx, loc), err,
), err
}
func (e *client) ToggleSuspend(_ context.Context, loc string, suspend bool) error {
return e.flagStore.ToggleSuspend(loc, suspend)
}
func (e *client) readStatus(ctx context.Context, dag *digraph.DAG) (DAGStatus, error) {
latestStatus, err := e.GetLatestStatus(ctx, dag)
id := strings.TrimSuffix(
filepath.Base(dag.Location),
filepath.Ext(dag.Location),
)
return newDAGStatus(
dag, latestStatus, e.IsSuspended(ctx, id), err,
), err
}
func (*client) emptyDAGIfNil(dag *digraph.DAG, dagLocation string) *digraph.DAG {
if dag != nil {
return dag
}
return &digraph.DAG{Location: dagLocation}
}
func (e *client) IsSuspended(_ context.Context, id string) bool {
return e.flagStore.IsSuspended(id)
}
func escapeArg(input string) string {
escaped := strings.Builder{}
for _, char := range input {
switch char {
case '\r':
_, _ = escaped.WriteString("\\r")
case '\n':
_, _ = escaped.WriteString("\\n")
default:
_, _ = escaped.WriteRune(char)
}
}
return escaped.String()
}
func (e *client) GetTagList(ctx context.Context) ([]string, []string, error) {
return e.dagStore.TagList(ctx)
}
func fromPtr[T any](p *T) T {
var zero T
if p == nil {
return zero
}
return *p
}

View File

@ -1,360 +0,0 @@
package client_test
import (
"encoding/json"
"fmt"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/sock"
"github.com/dagu-org/dagu/internal/test"
)
func TestClient_GetStatus(t *testing.T) {
t.Parallel()
th := test.Setup(t)
t.Run("Valid", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "valid.yaml"))
ctx := th.Context
requestID := uuid.Must(uuid.NewV7()).String()
socketServer, _ := sock.NewServer(
dag.SockAddr(requestID),
func(w http.ResponseWriter, _ *http.Request) {
status := persistence.NewStatusFactory(dag.DAG).Create(
requestID, scheduler.StatusRunning, 0, time.Now(),
)
w.WriteHeader(http.StatusOK)
jsonData, err := json.Marshal(status)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
_, _ = w.Write(jsonData)
},
)
go func() {
_ = socketServer.Serve(ctx, nil)
_ = socketServer.Shutdown(ctx)
}()
dag.AssertCurrentStatus(t, scheduler.StatusRunning)
_ = socketServer.Shutdown(ctx)
dag.AssertCurrentStatus(t, scheduler.StatusNone)
})
t.Run("InvalidDAGName", func(t *testing.T) {
ctx := th.Context
cli := th.Client
dagStatus, err := cli.GetDAGStatus(ctx, "invalid-dag-name")
require.Error(t, err)
require.NotNil(t, dagStatus)
// Check the status contains error.
require.Error(t, dagStatus.Error)
})
t.Run("UpdateStatus", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "update_status.yaml"))
requestID := uuid.Must(uuid.NewV7()).String()
now := time.Now()
ctx := th.Context
cli := th.Client
// Open the history store and write a status before updating it.
record, err := th.HistoryStore.NewRecord(ctx, dag.DAG, now, requestID, persistence.NewRecordOptions{})
require.NoError(t, err)
err = record.Open(ctx)
require.NoError(t, err)
status := testNewStatus(dag.DAG, requestID, scheduler.StatusSuccess, scheduler.NodeStatusSuccess)
err = record.Write(ctx, status)
require.NoError(t, err)
_ = record.Close(ctx)
// Get the status and check if it is the same as the one we wrote.
statusToCheck, err := cli.GetStatusByRequestID(ctx, dag.DAG, requestID)
require.NoError(t, err)
require.Equal(t, scheduler.NodeStatusSuccess, statusToCheck.Nodes[0].Status)
// Update the status.
newStatus := scheduler.NodeStatusError
status.Nodes[0].Status = newStatus
err = cli.UpdateStatus(ctx, dag.Name, status)
require.NoError(t, err)
statusByRequestID, err := cli.GetStatusByRequestID(ctx, dag.DAG, requestID)
require.NoError(t, err)
require.Equal(t, 1, len(status.Nodes))
require.Equal(t, newStatus, statusByRequestID.Nodes[0].Status)
})
t.Run("InvalidUpdateStatusWithInvalidReqID", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "invalid_reqid.yaml"))
ctx := th.Context
cli := th.Client
// update with invalid request id
status := testNewStatus(dag.DAG, "unknown-req-id", scheduler.StatusError, scheduler.NodeStatusError)
// Check if the update fails.
err := cli.UpdateStatus(ctx, dag.Name, status)
require.Error(t, err)
})
}
func TestClient_RunDAG(t *testing.T) {
th := test.Setup(t)
t.Run("RunDAG", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "run_dag.yaml"))
dagStatus, err := th.Client.GetDAGStatus(th.Context, dag.Location)
require.NoError(t, err)
err = th.Client.StartDAG(th.Context, dagStatus.DAG, client.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
status, err := th.Client.GetLatestStatus(th.Context, dagStatus.DAG)
require.NoError(t, err)
require.Equal(t, scheduler.StatusSuccess.String(), status.Status.String())
})
t.Run("Stop", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "stop.yaml"))
ctx := th.Context
err := th.Client.StartDAG(ctx, dag.DAG, client.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusRunning)
err = th.Client.StopDAG(ctx, dag.DAG)
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusCancel)
})
t.Run("Restart", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "restart.yaml"))
ctx := th.Context
err := th.Client.RestartDAG(ctx, dag.DAG, client.RestartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
})
t.Run("Retry", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "retry.yaml"))
ctx := th.Context
cli := th.Client
err := cli.StartDAG(ctx, dag.DAG, client.StartOptions{Params: "x y z"})
require.NoError(t, err)
// Wait for the DAG to finish
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
// Retry the DAG with the same params.
status, err := cli.GetLatestStatus(ctx, dag.DAG)
require.NoError(t, err)
previousRequestID := status.RequestID
previousParams := status.Params
time.Sleep(1 * time.Second)
err = cli.RetryDAG(ctx, dag.DAG, previousRequestID)
require.NoError(t, err)
// Wait for the DAG to finish
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
status, err = cli.GetLatestStatus(ctx, dag.DAG)
require.NoError(t, err)
// Check if the params are the same as the previous run.
require.Equal(t, previousRequestID, status.RequestID)
require.Equal(t, previousParams, status.Params)
})
}
func TestClient_UpdateDAG(t *testing.T) {
t.Parallel()
th := test.Setup(t)
t.Run("Update", func(t *testing.T) {
ctx := th.Context
cli := th.Client
// valid DAG
validDAG := `name: test DAG
steps:
- name: "1"
command: "true"
`
// Update Error: the DAG does not exist
err := cli.UpdateDAG(ctx, "non-existing-dag", validDAG)
require.Error(t, err)
// create a new DAG file
id, err := cli.CreateDAG(ctx, "new-dag-file")
require.NoError(t, err)
// Update the DAG
err = cli.UpdateDAG(ctx, id, validDAG)
require.NoError(t, err)
// Check the content of the DAG file
spec, err := cli.GetDAGSpec(ctx, id)
require.NoError(t, err)
require.Equal(t, validDAG, spec)
})
t.Run("Remove", func(t *testing.T) {
ctx := th.Context
cli := th.Client
spec := `name: test DAG
steps:
- name: "1"
command: "true"
`
id, err := cli.CreateDAG(ctx, "test")
require.NoError(t, err)
err = cli.UpdateDAG(ctx, id, spec)
require.NoError(t, err)
// check file
newSpec, err := cli.GetDAGSpec(ctx, id)
require.NoError(t, err)
require.Equal(t, spec, newSpec)
// delete
err = cli.DeleteDAG(ctx, id)
require.NoError(t, err)
})
t.Run("Create", func(t *testing.T) {
ctx := th.Context
cli := th.Client
id, err := cli.CreateDAG(ctx, "test-dag")
require.NoError(t, err)
// Check if the new DAG is actually created.
filePath := filepath.Join(th.Config.Paths.DAGsDir, id+".yaml")
dag, err := digraph.Load(ctx, filePath)
require.NoError(t, err)
require.Equal(t, "test-dag", dag.Name)
})
t.Run("Rename", func(t *testing.T) {
ctx := th.Context
cli := th.Client
// Create a DAG to rename.
id, err := cli.CreateDAG(ctx, "old_name")
require.NoError(t, err)
_, err = cli.GetDAGStatus(ctx, filepath.Join(th.Config.Paths.DAGsDir, id+".yaml"))
require.NoError(t, err)
// Rename the file.
err = cli.MoveDAG(ctx, id, id+"_renamed")
// Check if the file is renamed.
require.NoError(t, err)
require.FileExists(t, filepath.Join(th.Config.Paths.DAGsDir, id+"_renamed.yaml"))
})
}
func TestClient_ReadHistory(t *testing.T) {
t.Parallel()
th := test.Setup(t)
t.Run("TestClient_Empty", func(t *testing.T) {
ctx := th.Context
cli := th.Client
dag := th.DAG(t, filepath.Join("client", "empty_status.yaml"))
_, err := cli.GetDAGStatus(ctx, dag.Location)
require.NoError(t, err)
})
t.Run("TestClient_All", func(t *testing.T) {
ctx := th.Context
cli := th.Client
// Create a DAG
_, err := cli.CreateDAG(ctx, "test-dag1")
require.NoError(t, err)
_, err = cli.CreateDAG(ctx, "test-dag2")
require.NoError(t, err)
// Get all statuses.
result, errList, err := cli.ListStatus(ctx)
require.NoError(t, err)
require.Empty(t, errList)
require.Equal(t, 2, len(result.Items))
})
}
func testNewStatus(dag *digraph.DAG, requestID string, status scheduler.Status, nodeStatus scheduler.NodeStatus) persistence.Status {
nodes := []scheduler.NodeData{{State: scheduler.NodeState{Status: nodeStatus}}}
tm := time.Now()
startedAt := &tm
return persistence.NewStatusFactory(dag).Create(
requestID, status, 0, *startedAt, persistence.WithNodes(nodes),
)
}
func TestClient_GetTagList(t *testing.T) {
th := test.Setup(t)
ctx := th.Context
cli := th.Client
// Create DAG List
for i := 0; i < 40; i++ {
spec := ""
id, err := cli.CreateDAG(ctx, "1test-dag-pagination"+fmt.Sprintf("%d", i))
require.NoError(t, err)
if i%2 == 0 {
spec = "tags: tag1,tag2\nsteps:\n - name: step1\n command: echo hello\n"
} else {
spec = "tags: tag2,tag3\nsteps:\n - name: step1\n command: echo hello\n"
}
if err = cli.UpdateDAG(ctx, id, spec); err != nil {
t.Fatal(err)
}
}
tags, errs, err := cli.GetTagList(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(errs))
require.Equal(t, 3, len(tags))
mapTags := make(map[string]bool)
for _, tag := range tags {
mapTags[tag] = true
}
require.True(t, mapTags["tag1"])
require.True(t, mapTags["tag2"])
require.True(t, mapTags["tag3"])
}

View File

@ -1,119 +0,0 @@
package client
import (
"context"
"path/filepath"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/persistence"
)
// FIXME: Separate DAG client interface and Runs client interface
type Client interface {
CreateDAG(ctx context.Context, loc string) (string, error)
GetDAGSpec(ctx context.Context, loc string) (string, error)
GrepDAG(ctx context.Context, pattern string) ([]*persistence.GrepResult, []string, error)
MoveDAG(ctx context.Context, oldLoc, newLoc string) error
StopDAG(ctx context.Context, dag *digraph.DAG) error
StartDAG(ctx context.Context, dag *digraph.DAG, opts StartOptions) error
RestartDAG(ctx context.Context, dag *digraph.DAG, opts RestartOptions) error
RetryDAG(ctx context.Context, dag *digraph.DAG, requestID string) error
GetCurrentStatus(ctx context.Context, dag *digraph.DAG) (*persistence.Status, error)
GetStatusByRequestID(ctx context.Context, dag *digraph.DAG, requestID string) (*persistence.Status, error)
GetLatestStatus(ctx context.Context, dag *digraph.DAG) (persistence.Status, error)
GetRecentHistory(ctx context.Context, name string, n int) []persistence.Run
GetStatus(ctx context.Context, name string, requestID string) (*persistence.Status, error)
UpdateStatus(ctx context.Context, name string, status persistence.Status) error
LoadYAML(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error)
UpdateDAG(ctx context.Context, loc string, spec string) error
DeleteDAG(ctx context.Context, loc string) error
ListStatus(ctx context.Context, opts ...ListStatusOption) (*persistence.PaginatedResult[DAGStatus], []string, error)
GetDAGStatus(ctx context.Context, loc string) (DAGStatus, error)
IsSuspended(ctx context.Context, loc string) bool
ToggleSuspend(ctx context.Context, loc string, suspend bool) error
GetTagList(ctx context.Context) ([]string, []string, error)
}
type GetAllStatusOptions struct {
// Number of items to return per page
Limit *int
// Page number (for pagination)
Page *int
// Filter DAGs by matching name
Name *string
// Filter DAGs by matching tag
Tag *string
}
type ListStatusOption func(*GetAllStatusOptions)
func WithLimit(limit int) ListStatusOption {
return func(opt *GetAllStatusOptions) {
opt.Limit = &limit
}
}
func WithPage(page int) ListStatusOption {
return func(opt *GetAllStatusOptions) {
opt.Page = &page
}
}
func WithName(name string) ListStatusOption {
return func(opt *GetAllStatusOptions) {
opt.Name = &name
}
}
func WithTag(tag string) ListStatusOption {
return func(opt *GetAllStatusOptions) {
opt.Tag = &tag
}
}
type StartOptions struct {
Params string
Quiet bool
}
type RestartOptions struct {
Quiet bool
}
type DAGStatus struct {
File string
Dir string
DAG *digraph.DAG
Status persistence.Status
Suspended bool
Error error
}
// ErrorAsString converts the error to a string if it exists, otherwise returns an empty string.
func (s DAGStatus) ErrorAsString() string {
if s.Error == nil {
return ""
}
return s.Error.Error()
}
func newDAGStatus(
dag *digraph.DAG, status persistence.Status, suspended bool, err error,
) DAGStatus {
var (
file string
dir string
)
if dag.Location != "" {
file = dag.Location
dir = filepath.Dir(dag.Location)
}
return DAGStatus{
File: file,
Dir: dir,
DAG: dag,
Status: status,
Suspended: suspended,
Error: err,
}
}

View File

@ -9,18 +9,16 @@ import (
"syscall"
"time"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/cmdutil"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/dagstore/filestore"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/frontend"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/filecache"
"github.com/dagu-org/dagu/internal/persistence/jsondb"
"github.com/dagu-org/dagu/internal/persistence/local"
"github.com/dagu-org/dagu/internal/persistence/local/storage"
"github.com/dagu-org/dagu/internal/runstore"
runfs "github.com/dagu-org/dagu/internal/runstore/filestore"
"github.com/dagu-org/dagu/internal/scheduler"
"github.com/dagu-org/dagu/internal/stringutil"
"github.com/google/uuid"
@ -124,71 +122,89 @@ func (c *Context) init(cmd *cobra.Command) error {
}
// Client initializes a Client using the provided options. If not supplied,
// it creates default DAGStore and HistoryStore instances.
func (s *Context) Client(opts ...clientOption) (client.Client, error) {
// it creates default DAGStore and RunStore instances.
func (s *Context) Client(opts ...clientOption) (runstore.Client, error) {
options := &clientOptions{}
for _, opt := range opts {
opt(options)
}
runStore := options.runStore
if runStore == nil {
runStore = s.runStore()
}
return runstore.NewClient(
runStore,
s.cfg.Paths.Executable,
s.cfg.Global.WorkDir,
), nil
}
// DAGClient initializes a DAGClient using the provided options.
func (s *Context) DAGClient(runClient runstore.Client, opts ...dagClientOption) (dagstore.Client, error) {
options := &dagClientOptions{}
for _, opt := range opts {
opt(options)
}
dagStore := options.dagStore
if dagStore == nil {
var err error
dagStore, err = s.dagStore()
dagStore, err = s.dagStore(nil)
if err != nil {
return nil, fmt.Errorf("failed to initialize DAG store: %w", err)
return dagstore.Client{}, fmt.Errorf("failed to initialize DAG store: %w", err)
}
}
historyStore := options.historyStore
if historyStore == nil {
historyStore = s.historyStore()
}
// Create a flag store based on the suspend flags directory.
flagStore := local.NewFlagStore(storage.NewStorage(
s.cfg.Paths.SuspendFlagsDir,
))
return client.New(
return dagstore.NewClient(
runClient,
dagStore,
historyStore,
flagStore,
s.cfg.Paths.Executable,
s.cfg.Global.WorkDir,
), nil
}
// server creates and returns a new web UI server.
// It initializes in-memory caches for DAGs and history, and uses them in the client.
// It initializes in-memory caches for DAGs and runstore, and uses them in the client.
func (ctx *Context) server() (*frontend.Server, error) {
dagCache := filecache.New[*digraph.DAG](0, time.Hour*12)
dagCache := fileutil.NewCache[*digraph.DAG](0, time.Hour*12)
dagCache.StartEviction(ctx)
dagStore := ctx.dagStoreWithCache(dagCache)
historyCache := filecache.New[*persistence.Status](0, time.Hour*12)
historyCache.StartEviction(ctx)
historyStore := ctx.historyStoreWithCache(historyCache)
statusCache := fileutil.NewCache[*runstore.Status](0, time.Hour*12)
statusCache.StartEviction(ctx)
runStore := ctx.runStoreWithCache(statusCache)
cli, err := ctx.Client(withDAGStore(dagStore), withHistoryStore(historyStore))
runCli, err := ctx.Client(withRunStore(runStore))
if err != nil {
return nil, fmt.Errorf("failed to initialize client: %w", err)
}
return frontend.NewServer(ctx.cfg, cli), nil
dagCli, err := ctx.DAGClient(runCli, withDAGStore(dagStore))
if err != nil {
return nil, fmt.Errorf("failed to initialize DAG client: %w", err)
}
return frontend.NewServer(ctx.cfg, dagCli, runCli), nil
}
// scheduler creates a new scheduler instance using the default client.
// It builds a DAG job manager to handle scheduled executions.
func (s *Context) scheduler() (*scheduler.Scheduler, error) {
cli, err := s.Client()
runCli, err := s.Client()
if err != nil {
return nil, fmt.Errorf("failed to initialize client: %w", err)
}
manager := scheduler.NewDAGJobManager(s.cfg.Paths.DAGsDir, cli, s.cfg.Paths.Executable, s.cfg.Global.WorkDir)
dagCli, err := s.DAGClient(runCli)
if err != nil {
return nil, fmt.Errorf("failed to initialize DAG client: %w", err)
}
manager := scheduler.NewDAGJobManager(s.cfg.Paths.DAGsDir, dagCli, runCli, s.cfg.Paths.Executable, s.cfg.Global.WorkDir)
return scheduler.New(s.cfg, manager), nil
}
// dagStore returns a new DAGStore instance. It ensures that the directory exists
// (creating it if necessary) before returning the store.
func (s *Context) dagStore() (persistence.DAGStore, error) {
func (s *Context) dagStore(searchPaths []string) (dagstore.Store, error) {
baseDir := s.cfg.Paths.DAGsDir
_, err := os.Stat(baseDir)
if os.IsNotExist(err) {
@ -197,27 +213,31 @@ func (s *Context) dagStore() (persistence.DAGStore, error) {
}
}
return local.NewDAGStore(s.cfg.Paths.DAGsDir), nil
// Create a flag store based on the suspend flags directory.
return filestore.New(
s.cfg.Paths.DAGsDir,
filestore.WithFlagsBaseDir(s.cfg.Paths.SuspendFlagsDir),
filestore.WithSearchPaths(searchPaths)), nil
}
// dagStoreWithCache returns a DAGStore instance that uses an in-memory file cache.
func (s *Context) dagStoreWithCache(cache *filecache.Cache[*digraph.DAG]) persistence.DAGStore {
return local.NewDAGStore(s.cfg.Paths.DAGsDir, local.WithFileCache(cache))
func (s *Context) dagStoreWithCache(cache *fileutil.Cache[*digraph.DAG]) dagstore.Store {
return filestore.New(s.cfg.Paths.DAGsDir, filestore.WithFlagsBaseDir(s.cfg.Paths.SuspendFlagsDir), filestore.WithFileCache(cache))
}
// historyStore returns a new HistoryStore instance using JSON database storage.
// runStore returns a new RunStore instance using JSON database storage.
// It applies the "latestStatusToday" setting from the server configuration.
func (s *Context) historyStore() persistence.HistoryStore {
return jsondb.New(s.cfg.Paths.DataDir, jsondb.WithLatestStatusToday(
func (s *Context) runStore() runstore.Store {
return runfs.New(s.cfg.Paths.DataDir, runfs.WithLatestStatusToday(
s.cfg.Server.LatestStatusToday,
))
}
// historyStoreWithCache returns a HistoryStore that uses an in-memory cache.
func (s *Context) historyStoreWithCache(cache *filecache.Cache[*persistence.Status]) persistence.HistoryStore {
return jsondb.New(s.cfg.Paths.DataDir,
jsondb.WithLatestStatusToday(s.cfg.Server.LatestStatusToday),
jsondb.WithFileCache(cache),
// runStoreWithCache returns a RunStore that uses an in-memory cache.
func (s *Context) runStoreWithCache(cache *fileutil.Cache[*runstore.Status]) runstore.Store {
return runfs.New(s.cfg.Paths.DataDir,
runfs.WithLatestStatusToday(s.cfg.Server.LatestStatusToday),
runfs.WithFileCache(cache),
)
}
@ -311,21 +331,28 @@ type clientOption func(*clientOptions)
// clientOptions holds optional dependencies for constructing a client.
type clientOptions struct {
dagStore persistence.DAGStore
historyStore persistence.HistoryStore
runStore runstore.Store
}
// withDAGStore returns a clientOption that sets a custom DAGStore.
func withDAGStore(dagStore persistence.DAGStore) clientOption {
// withRunStore returns a clientOption that sets a custom RunStore.
func withRunStore(historyStore runstore.Store) clientOption {
return func(o *clientOptions) {
o.dagStore = dagStore
o.runStore = historyStore
}
}
// withHistoryStore returns a clientOption that sets a custom HistoryStore.
func withHistoryStore(historyStore persistence.HistoryStore) clientOption {
return func(o *clientOptions) {
o.historyStore = historyStore
// dagClientOption defines functional options for configuring the DAG client.
type dagClientOption func(*dagClientOptions)
// dagClientOption defines functional options for configuring the DAG client.
type dagClientOptions struct {
dagStore dagstore.Store
}
// withDAGStore returns a clientOption that sets a custom DAGStore.
func withDAGStore(dagStore dagstore.Store) dagClientOption {
return func(o *dagClientOptions) {
o.dagStore = dagStore
}
}

View File

@ -68,7 +68,7 @@ func runDry(ctx *Context, args []string) error {
ctx.LogToFile(logFile)
dagStore, err := ctx.dagStore()
dagStore, err := ctx.dagStore([]string{filepath.Dir(dag.Location)})
if err != nil {
return fmt.Errorf("failed to initialize DAG store: %w", err)
}
@ -87,7 +87,7 @@ func runDry(ctx *Context, args []string) error {
logFile.Name(),
cli,
dagStore,
ctx.historyStore(),
ctx.runStore(),
rootDAG,
agent.Options{Dry: true},
)

View File

@ -69,6 +69,27 @@ var (
usage: "Unique request ID for a DAG run",
}
// Unique request ID used for stopping a DAG run.
requestIDFlagStop = commandLineFlag{
name: "request-id",
shorthand: "r",
usage: "Request ID for stopping a DAG run",
}
// Unique request ID used for restarting a DAG run.
requestIDFlagRestart = commandLineFlag{
name: "request-id",
shorthand: "r",
usage: "Request ID for restarting a DAG run",
}
// Unique request ID used for checking the status of a DAG run.
requestIDFlagStatus = commandLineFlag{
name: "request-id",
shorthand: "r",
usage: "Request ID for checking the status of a DAG run",
}
// rootRequestIDFlag reads the root request ID for starting a sub-DAG run
rootRequestIDFlag = commandLineFlag{
name: "root-request-id",

View File

@ -8,94 +8,109 @@ import (
"time"
"github.com/dagu-org/dagu/internal/agent"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/spf13/cobra"
)
func CmdRestart() *cobra.Command {
return NewCommand(
&cobra.Command{
Use: "restart [flags] /path/to/spec.yaml",
Use: "restart --request-id=abc123 dagName",
Short: "Restart a running DAG",
Long: `Stop the currently running DAG and immediately restart it with the same configuration.
Long: `Stop the currently running DAG and immediately restart it with the same configuration but with a new request ID.
Flags:
--request-id string (Optional) Unique identifier for tracking the restart execution.
--request-id string (optional) Unique identifier for tracking the restart execution.
Example:
dagu restart my_dag.yaml --request-id=abc123
dagu restart --request-id=abc123 dagName
This command gracefully stops the active DAG run before reinitiating it.
This command gracefully stops the active DAG run before restarting it.
If the request ID is not provided, it will find the current running DAG by name.
`,
Args: cobra.ExactArgs(1),
}, restartFlags, runRestart,
)
}
var restartFlags = []commandLineFlag{}
var restartFlags = []commandLineFlag{
requestIDFlagRestart,
}
func runRestart(ctx *Context, args []string) error {
specFilePath := args[0]
dag, err := digraph.Load(ctx, specFilePath, digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig))
requestID, err := ctx.Flags().GetString("request-id")
if err != nil {
logger.Error(ctx, "Failed to load DAG", "path", specFilePath, "err", err)
return fmt.Errorf("failed to load DAG from %s: %w", specFilePath, err)
return fmt.Errorf("failed to get request ID: %w", err)
}
if err := handleRestartProcess(ctx, dag, specFilePath); err != nil {
logger.Error(ctx, "Failed to restart process", "path", specFilePath, "err", err)
dagName := args[0]
var record runstore.Record
if requestID != "" {
// Retrieve the previous run's runstore record for the specified request ID.
r, err := ctx.runStore().FindByRequestID(ctx, dagName, requestID)
if err != nil {
logger.Error(ctx, "Failed to retrieve historical run", "requestID", requestID, "err", err)
return fmt.Errorf("failed to retrieve historical run for request ID %s: %w", requestID, err)
}
record = r
} else {
r, err := ctx.runStore().Latest(ctx, dagName)
if err != nil {
logger.Error(ctx, "Failed to retrieve latest runstore record", "dagName", dagName, "err", err)
return fmt.Errorf("failed to retrieve latest runstore record for DAG %s: %w", dagName, err)
}
record = r
}
status, err := record.ReadStatus(ctx)
if err != nil {
logger.Error(ctx, "Failed to read status", "err", err)
return fmt.Errorf("failed to read status: %w", err)
}
if status.Status != scheduler.StatusRunning {
logger.Error(ctx, "DAG is not running", "dagName", dagName)
}
dag, err := record.ReadDAG(ctx)
if err != nil {
logger.Error(ctx, "Failed to read DAG from runstore record", "err", err)
return fmt.Errorf("failed to read DAG from runstore record: %w", err)
}
if err := handleRestartProcess(ctx, dag, requestID); err != nil {
logger.Error(ctx, "Failed to restart DAG", "dagName", dag.Name, "err", err)
return fmt.Errorf("restart process failed for DAG %s: %w", dag.Name, err)
}
return nil
}
func handleRestartProcess(ctx *Context, dag *digraph.DAG, specFilePath string) error {
func handleRestartProcess(ctx *Context, dag *digraph.DAG, requestID string) error {
cli, err := ctx.Client()
if err != nil {
return fmt.Errorf("failed to initialize client: %w", err)
}
// Stop if running
if err := stopDAGIfRunning(ctx, cli, dag); err != nil {
if err := stopDAGIfRunning(ctx, cli, dag, requestID); err != nil {
return fmt.Errorf("failed to stop DAG: %w", err)
}
// Wait before restart if configured
waitForRestart(ctx, dag.RestartWait)
// Get previous parameters
status, err := getPreviousRunStatus(ctx, cli, dag)
if err != nil {
return fmt.Errorf("failed to get previous run parameters: %w", err)
}
loadOpts := []digraph.LoadOption{
digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig),
digraph.WithDAGsDir(ctx.cfg.Paths.DAGsDir),
}
if status.Params != "" {
// backward compatibility
loadOpts = append(loadOpts, digraph.WithParams(status.Params))
} else {
loadOpts = append(loadOpts, digraph.WithParams(status.ParamsList))
}
// Reload DAG with parameters
dag, err = digraph.Load(ctx, specFilePath, loadOpts...)
if err != nil {
return fmt.Errorf("failed to reload DAG with params: %w", err)
if dag.RestartWait > 0 {
logger.Info(ctx, "Waiting for restart", "duration", dag.RestartWait)
time.Sleep(dag.RestartWait)
}
// Execute the exact same DAG with the same parameters but a new request ID
return executeDAG(ctx, cli, dag)
}
func executeDAG(ctx *Context, cli client.Client, dag *digraph.DAG) error {
func executeDAG(ctx *Context, cli runstore.Client, dag *digraph.DAG) error {
requestID, err := generateRequestID()
if err != nil {
return fmt.Errorf("failed to generate request ID: %w", err)
@ -113,7 +128,7 @@ func executeDAG(ctx *Context, cli client.Client, dag *digraph.DAG) error {
logger.Info(ctx, "DAG restart initiated", "DAG", dag.Name, "requestID", requestID, "logFile", logFile.Name())
dagStore, err := ctx.dagStore()
dagStore, err := ctx.dagStore([]string{filepath.Dir(dag.Location)})
if err != nil {
logger.Error(ctx, "Failed to initialize DAG store", "err", err)
return fmt.Errorf("failed to initialize DAG store: %w", err)
@ -128,7 +143,7 @@ func executeDAG(ctx *Context, cli client.Client, dag *digraph.DAG) error {
logFile.Name(),
cli,
dagStore,
ctx.historyStore(),
ctx.runStore(),
rootDAG,
agent.Options{Dry: false})
@ -145,25 +160,25 @@ func executeDAG(ctx *Context, cli client.Client, dag *digraph.DAG) error {
return nil
}
func stopDAGIfRunning(ctx context.Context, cli client.Client, dag *digraph.DAG) error {
status, err := cli.GetCurrentStatus(ctx, dag)
func stopDAGIfRunning(ctx context.Context, cli runstore.Client, dag *digraph.DAG, requestID string) error {
status, err := cli.GetRealtimeStatus(ctx, dag, requestID)
if err != nil {
return fmt.Errorf("failed to get current status: %w", err)
}
if status.Status == scheduler.StatusRunning {
logger.Infof(ctx, "Stopping: %s", dag.Name)
if err := stopRunningDAG(ctx, cli, dag); err != nil {
if err := stopRunningDAG(ctx, cli, dag, requestID); err != nil {
return fmt.Errorf("failed to stop running DAG: %w", err)
}
}
return nil
}
func stopRunningDAG(ctx context.Context, cli client.Client, dag *digraph.DAG) error {
func stopRunningDAG(ctx context.Context, cli runstore.Client, dag *digraph.DAG, requestID string) error {
const stopPollInterval = 100 * time.Millisecond
for {
status, err := cli.GetCurrentStatus(ctx, dag)
status, err := cli.GetRealtimeStatus(ctx, dag, requestID)
if err != nil {
return fmt.Errorf("failed to get current status: %w", err)
}
@ -172,25 +187,10 @@ func stopRunningDAG(ctx context.Context, cli client.Client, dag *digraph.DAG) er
return nil
}
if err := cli.StopDAG(ctx, dag); err != nil {
if err := cli.Stop(ctx, dag, requestID); err != nil {
return fmt.Errorf("failed to stop DAG: %w", err)
}
time.Sleep(stopPollInterval)
}
}
func waitForRestart(ctx context.Context, restartWait time.Duration) {
if restartWait > 0 {
logger.Info(ctx, "Waiting for restart", "duration", restartWait)
time.Sleep(restartWait)
}
}
func getPreviousRunStatus(ctx context.Context, cli client.Client, dag *digraph.DAG) (persistence.Status, error) {
status, err := cli.GetLatestStatus(ctx, dag)
if err != nil {
return persistence.Status{}, fmt.Errorf("failed to get latest status: %w", err)
}
return status, nil
}

View File

@ -57,10 +57,10 @@ func TestRestartCommand(t *testing.T) {
time.Sleep(time.Millisecond * 300) // Wait for the history to be updated.
recentHistory := client.GetRecentHistory(th.Context, loaded.Name, 2)
recentHistory := client.ListRecentHistory(th.Context, loaded.Name, 2)
require.Len(t, recentHistory, 2)
require.Equal(t, recentHistory[0].Status.Params, recentHistory[1].Status.Params)
require.Equal(t, recentHistory[0].Params, recentHistory[1].Params)
<-done
})

View File

@ -8,7 +8,7 @@ import (
"github.com/dagu-org/dagu/internal/agent"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/spf13/cobra"
)
@ -39,46 +39,31 @@ func runRetry(ctx *Context, args []string) error {
dagName := args[0]
// Retrieve the previous run's history record for the specified request ID.
historyRecord, err := ctx.historyStore().FindByRequestID(ctx, dagName, requestID)
// Retrieve the previous run's runstore record for the specified request ID.
runRecord, err := ctx.runStore().FindByRequestID(ctx, dagName, requestID)
if err != nil {
logger.Error(ctx, "Failed to retrieve historical run", "requestID", requestID, "err", err)
return fmt.Errorf("failed to retrieve historical run for request ID %s: %w", requestID, err)
}
// Read the detailed status of the previous run.
run, err := historyRecord.ReadRun(ctx)
// Read the detailed status of the previous status.
status, err := runRecord.ReadStatus(ctx)
if err != nil {
logger.Error(ctx, "Failed to read status", "err", err)
return fmt.Errorf("failed to read status: %w", err)
}
loadOpts := []digraph.LoadOption{
digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig),
digraph.WithDAGsDir(ctx.cfg.Paths.DAGsDir),
}
if run.Status.Params != "" {
// If the 'Params' field is not empty, use it instead of 'ParamsList' for backward compatibility.
loadOpts = append(loadOpts, digraph.WithParams(run.Status.Params))
} else {
loadOpts = append(loadOpts, digraph.WithParams(run.Status.ParamsList))
}
// Load the DAG from the local file.
// TODO: Read the DAG from the history record instead of the local file.
dag, err := digraph.Load(ctx, dagName, loadOpts...)
// Get the DAG instance from the runstore record.
dag, err := runRecord.ReadDAG(ctx)
if err != nil {
logger.Error(ctx, "Failed to load DAG specification", "path", dagName, "err", err)
// nolint : staticcheck
return fmt.Errorf("failed to load DAG specification from %s with params %s: %w",
dagName, run.Status.Params, err)
logger.Error(ctx, "Failed to read DAG from runstore record", "err", err)
}
// The retry command is currently only supported for root DAGs.
// Therefore we use the request ID as the root DAG request ID here.
rootDAG := digraph.NewRootDAG(dag.Name, run.Status.RequestID)
rootDAG := digraph.NewRootDAG(dag.Name, status.RequestID)
if err := executeRetry(ctx, dag, run, rootDAG); err != nil {
if err := executeRetry(ctx, dag, status, rootDAG); err != nil {
logger.Error(ctx, "Failed to execute retry", "path", dagName, "err", err)
return fmt.Errorf("failed to execute retry: %w", err)
}
@ -86,11 +71,11 @@ func runRetry(ctx *Context, args []string) error {
return nil
}
func executeRetry(ctx *Context, dag *digraph.DAG, run *persistence.Run, rootDAG digraph.RootDAG) error {
logger.Debug(ctx, "Executing retry", "dagName", dag.Name, "requestID", run.Status.RequestID)
func executeRetry(ctx *Context, dag *digraph.DAG, status *runstore.Status, rootDAG digraph.RootDAG) error {
logger.Debug(ctx, "Executing retry", "dagName", dag.Name, "requestID", status.RequestID)
// We use the same log file for the retry as the original run.
logFile, err := OpenOrCreateLogFile(run.Status.Log)
logFile, err := OpenOrCreateLogFile(status.Log)
if err != nil {
return fmt.Errorf("failed to open log file: %w", err)
}
@ -98,12 +83,12 @@ func executeRetry(ctx *Context, dag *digraph.DAG, run *persistence.Run, rootDAG
_ = logFile.Close()
}()
logger.Info(ctx, "DAG retry initiated", "DAG", dag.Name, "requestID", run.Status.RequestID, "logFile", logFile.Name())
logger.Info(ctx, "DAG retry initiated", "DAG", dag.Name, "requestID", status.RequestID, "logFile", logFile.Name())
// Update the context with the log file
ctx.LogToFile(logFile)
dagStore, err := ctx.dagStore()
dagStore, err := ctx.dagStore([]string{filepath.Dir(dag.Location)})
if err != nil {
logger.Error(ctx, "Failed to initialize DAG store", "err", err)
return fmt.Errorf("failed to initialize DAG store: %w", err)
@ -116,15 +101,15 @@ func executeRetry(ctx *Context, dag *digraph.DAG, run *persistence.Run, rootDAG
}
agentInstance := agent.New(
run.Status.RequestID,
status.RequestID,
dag,
filepath.Dir(logFile.Name()),
logFile.Name(),
cli,
dagStore,
ctx.historyStore(),
ctx.runStore(),
rootDAG,
agent.Options{RetryTarget: &run.Status},
agent.Options{RetryTarget: status},
)
listenSignals(ctx, agentInstance)
@ -134,7 +119,7 @@ func executeRetry(ctx *Context, dag *digraph.DAG, run *persistence.Run, rootDAG
os.Exit(1)
} else {
agentInstance.PrintSummary(ctx)
return fmt.Errorf("failed to execute DAG %s (requestID: %s): %w", dag.Name, run.Status.RequestID, err)
return fmt.Errorf("failed to execute DAG %s (requestID: %s): %w", dag.Name, status.RequestID, err)
}
}

View File

@ -22,9 +22,9 @@ func TestRetryCommand(t *testing.T) {
th.RunCommand(t, cmd.CmdStart(), test.CmdTest{Args: args})
// Find the request ID.
cli := th.Client
cli := th.DAGClient
ctx := context.Background()
status, err := cli.GetDAGStatus(ctx, dagFile.Location)
status, err := cli.Status(ctx, dagFile.Location)
require.NoError(t, err)
require.Equal(t, status.Status.Status, scheduler.StatusSuccess)
require.NotNil(t, status.Status)

View File

@ -9,7 +9,7 @@ import (
"github.com/dagu-org/dagu/internal/agent"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/spf13/cobra"
)
@ -104,9 +104,9 @@ func runStart(ctx *Context, args []string) error {
// same request ID, ensuring idempotency across the the DAG from the root DAG.
if rootDAG.RequestID != requestID {
logger.Debug(ctx, "Checking for previous sub-DAG run with the request ID", "requestID", requestID)
var run *persistence.Run
record, err := ctx.historyStore().FindBySubRequestID(ctx, requestID, rootDAG)
if errors.Is(err, persistence.ErrRequestIDNotFound) {
var status *runstore.Status
record, err := ctx.runStore().FindBySubRunRequestID(ctx, requestID, rootDAG)
if errors.Is(err, runstore.ErrRequestIDNotFound) {
// If the request ID is not found, proceed with execution
goto EXEC
}
@ -114,12 +114,12 @@ func runStart(ctx *Context, args []string) error {
logger.Error(ctx, "Failed to retrieve historical run", "requestID", requestID, "err", err)
return fmt.Errorf("failed to retrieve historical run for request ID %s: %w", requestID, err)
}
run, err = record.ReadRun(ctx)
status, err = record.ReadStatus(ctx)
if err != nil {
logger.Error(ctx, "Failed to read previous run status", "requestID", requestID, "err", err)
return fmt.Errorf("failed to read previous run status for request ID %s: %w", requestID, err)
}
return executeRetry(ctx, dag, run, rootDAG)
return executeRetry(ctx, dag, status, rootDAG)
}
EXEC:
@ -144,7 +144,7 @@ func executeDag(ctx *Context, dag *digraph.DAG, requestID string, rootDAG digrap
logger.Debug(ctx, "DAG run initiated", "DAG", dag.Name, "requestID", requestID, "logFile", logFile.Name())
dagStore, err := ctx.dagStore()
dagStore, err := ctx.dagStore([]string{filepath.Dir(dag.Location)})
if err != nil {
logger.Error(ctx, "Failed to initialize DAG store", "err", err)
return fmt.Errorf("failed to initialize DAG store: %w", err)
@ -164,7 +164,7 @@ func executeDag(ctx *Context, dag *digraph.DAG, requestID string, rootDAG digrap
logFile.Name(),
cli,
dagStore,
ctx.historyStore(),
ctx.runStore(),
rootDAG,
opts,
)

View File

@ -3,18 +3,21 @@ package cmd
import (
"fmt"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/spf13/cobra"
)
func CmdStatus() *cobra.Command {
return NewCommand(
&cobra.Command{
Use: "status [flags] /path/to/spec.yaml",
Use: "status --request-id=abc123 dagName",
Short: "Display the current status of a DAG",
Long: `Show real-time status information for a specified DAG run.
Flags:
--request-id string (optional) Unique identifier for tracking the execution.
Example:
dagu status my_dag.yaml
`,
@ -23,13 +26,39 @@ Example:
)
}
var statusFlags = []commandLineFlag{}
var statusFlags = []commandLineFlag{
requestIDFlagStatus,
}
func runStatus(ctx *Context, args []string) error {
dag, err := digraph.Load(ctx, args[0], digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig))
requestID, err := ctx.Flags().GetString("request-id")
if err != nil {
logger.Error(ctx, "Failed to load DAG", "path", args[0], "err", err)
return fmt.Errorf("failed to load DAG from %s: %w", args[0], err)
return fmt.Errorf("failed to get request ID: %w", err)
}
dagName := args[0]
var record runstore.Record
if requestID != "" {
// Retrieve the previous run's runstore record for the specified request ID.
r, err := ctx.runStore().FindByRequestID(ctx, dagName, requestID)
if err != nil {
logger.Error(ctx, "Failed to retrieve historical run", "requestID", requestID, "err", err)
return fmt.Errorf("failed to retrieve historical run for request ID %s: %w", requestID, err)
}
record = r
} else {
r, err := ctx.runStore().Latest(ctx, dagName)
if err != nil {
logger.Error(ctx, "Failed to retrieve latest runstore record", "dagName", dagName, "err", err)
return fmt.Errorf("failed to retrieve latest runstore record for DAG %s: %w", dagName, err)
}
record = r
}
dag, err := record.ReadDAG(ctx)
if err != nil {
logger.Error(ctx, "Failed to read DAG from record", "dagName", dagName, "err", err)
}
cli, err := ctx.Client()
@ -38,7 +67,7 @@ func runStatus(ctx *Context, args []string) error {
return fmt.Errorf("failed to initialize client: %w", err)
}
status, err := cli.GetCurrentStatus(ctx, dag)
status, err := cli.GetRealtimeStatus(ctx, dag, requestID)
if err != nil {
logger.Error(ctx, "Failed to retrieve current status", "dag", dag.Name, "err", err)
return fmt.Errorf("failed to retrieve current status: %w", err)

View File

@ -25,11 +25,11 @@ func TestStatusCommand(t *testing.T) {
}()
require.Eventually(t, func() bool {
historyRecords := th.HistoryStore.Recent(th.Context, dagFile.Location, 1)
if len(historyRecords) < 1 {
runRecords := th.RunStore.Recent(th.Context, dagFile.Location, 1)
if len(runRecords) < 1 {
return false
}
status, err := historyRecords[0].ReadStatus(th.Context)
status, err := runRecords[0].ReadStatus(th.Context)
if err != nil {
return false
}

View File

@ -11,27 +11,58 @@ import (
func CmdStop() *cobra.Command {
return NewCommand(
&cobra.Command{
Use: "stop [flags] /path/to/spec.yaml",
Use: "stop --request-id=abc123 dagName",
Short: "Stop a running DAG",
Long: `Gracefully terminate an active DAG run.
Flags:
--request-id string (optional) Unique identifier for tracking the restart execution.
This command stops all running tasks of the specified DAG, ensuring resources are properly released.
If request ID is not provided, it will find the current running DAG by name.
Example:
dagu stop my_dag.yaml
dagu stop --request-id=abc123 dagName
`,
Args: cobra.ExactArgs(1),
}, stopFlags, runStop,
)
}
var stopFlags = []commandLineFlag{}
var stopFlags = []commandLineFlag{
requestIDFlagStop,
}
func runStop(ctx *Context, args []string) error {
dag, err := digraph.Load(ctx, args[0], digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig))
requestID, err := ctx.Flags().GetString("request-id")
if err != nil {
logger.Error(ctx, "Failed to load DAG", "err", err)
return fmt.Errorf("failed to load DAG from %s: %w", args[0], err)
return fmt.Errorf("failed to get request ID: %w", err)
}
dagName := args[0]
var dag *digraph.DAG
if requestID != "" {
// Retrieve the previous run's history record for the specified request ID.
runRecord, err := ctx.runStore().FindByRequestID(ctx, dagName, requestID)
if err != nil {
logger.Error(ctx, "Failed to retrieve historical run", "requestID", requestID, "err", err)
return fmt.Errorf("failed to retrieve historical run for request ID %s: %w", requestID, err)
}
d, err := runRecord.ReadDAG(ctx)
if err != nil {
logger.Error(ctx, "Failed to read DAG from history record", "err", err)
return fmt.Errorf("failed to read DAG from history record: %w", err)
}
dag = d
} else {
d, err := digraph.Load(ctx, args[0], digraph.WithBaseConfig(ctx.cfg.Paths.BaseConfig))
if err != nil {
logger.Error(ctx, "Failed to load DAG", "err", err)
return fmt.Errorf("failed to load DAG from %s: %w", args[0], err)
}
dag = d
}
logger.Info(ctx, "DAG is stopping", "dag", dag.Name)
@ -42,7 +73,7 @@ func runStop(ctx *Context, args []string) error {
return fmt.Errorf("failed to initialize client: %w", err)
}
if err := cli.StopDAG(ctx, dag); err != nil {
if err := cli.Stop(ctx, dag, requestID); err != nil {
logger.Error(ctx, "Failed to stop DAG", "dag", dag.Name, "err", err)
return fmt.Errorf("failed to stop DAG: %w", err)
}

View File

@ -7,6 +7,7 @@ import (
"github.com/dagu-org/dagu/internal/cmd"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/test"
"github.com/google/uuid"
)
func TestStopCommand(t *testing.T) {
@ -37,4 +38,32 @@ func TestStopCommand(t *testing.T) {
dagFile.AssertLatestStatus(t, scheduler.StatusCancel)
<-done
})
t.Run("StopDAGWithRequestID", func(t *testing.T) {
th := test.SetupCommand(t)
dagFile := th.DAG(t, "cmd/stop.yaml")
done := make(chan struct{})
reqId := uuid.Must(uuid.NewV7()).String()
go func() {
// Start the DAG to stop.
args := []string{"start", "--request-id=" + reqId, dagFile.Location}
th.RunCommand(t, cmd.CmdStart(), test.CmdTest{Args: args})
close(done)
}()
time.Sleep(time.Millisecond * 100)
// Wait for the DAG running.
dagFile.AssertLatestStatus(t, scheduler.StatusRunning)
// Stop the DAG.
th.RunCommand(t, cmd.CmdStop(), test.CmdTest{
Args: []string{"stop", dagFile.Location, "--request-id=" + reqId},
ExpectedOut: []string{"DAG stopped"}})
// Check the DAG is stopped.
dagFile.AssertLatestStatus(t, scheduler.StatusCancel)
<-done
})
}

258
internal/dagstore/client.go Normal file
View File

@ -0,0 +1,258 @@
package dagstore
import (
"context"
"fmt"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/runstore"
)
// NewClient creates a new DAG client.
// It takes a run client for interacting with run history and a DAG store for
// managing DAG specifications and metadata.
func NewClient(
runCli runstore.Client,
dagStore Store,
) Client {
return Client{
runClient: runCli,
dagStore: dagStore,
}
}
// Client provides operations for managing DAGs in the DAG store.
// It wraps the underlying DAG store and run client to provide a unified interface
// for DAG operations.
type Client struct {
runClient runstore.Client // Client for interacting with run history
dagStore Store // Store for DAG specifications and metadata
}
var (
// dagTemplate is the default template used when creating a new DAG.
// It contains a minimal DAG with a single step that echoes "hello".
dagTemplate = []byte(`steps:
- name: step1
command: echo hello
`)
)
// GetSpec retrieves the YAML specification of a DAG by its ID.
// It returns the specification as a string or an error if the DAG cannot be found.
func (cli *Client) GetSpec(ctx context.Context, id string) (string, error) {
return cli.dagStore.GetSpec(ctx, id)
}
// Create creates a new DAG with the given name using the default template.
// It returns the ID of the newly created DAG or an error if creation fails.
func (cli *Client) Create(ctx context.Context, name string) (string, error) {
id, err := cli.dagStore.Create(ctx, name, dagTemplate)
if err != nil {
return "", fmt.Errorf("failed to create DAG: %w", err)
}
return id, nil
}
// Grep searches for DAGs matching the given pattern.
// It returns a list of grep results, a list of errors encountered during the search,
// and an error if the search operation fails.
func (cli *Client) Grep(ctx context.Context, pattern string) (
[]*GrepResult, []string, error,
) {
return cli.dagStore.Grep(ctx, pattern)
}
// Move relocates a DAG from one location to another.
// It updates both the DAG's location in the store and its run history.
// Returns an error if any part of the move operation fails.
func (cli *Client) Move(ctx context.Context, oldLoc, newLoc string) error {
oldDAG, err := cli.dagStore.GetMetadata(ctx, oldLoc)
if err != nil {
return fmt.Errorf("failed to get metadata for %s: %w", oldLoc, err)
}
if err := cli.dagStore.Rename(ctx, oldLoc, newLoc); err != nil {
return err
}
newDAG, err := cli.dagStore.GetMetadata(ctx, newLoc)
if err != nil {
return fmt.Errorf("failed to get metadata for %s: %w", newLoc, err)
}
if err := cli.runClient.Rename(ctx, oldDAG.Name, newDAG.Name); err != nil {
return fmt.Errorf("failed to rename history for %s: %w", oldLoc, err)
}
return nil
}
// Update modifies the specification of an existing DAG.
// It takes the DAG ID and the new specification as a string.
// Returns an error if the update operation fails.
func (cli *Client) Update(ctx context.Context, id string, spec string) error {
return cli.dagStore.UpdateSpec(ctx, id, []byte(spec))
}
// Delete removes a DAG from the store.
// It takes the name of the DAG to delete.
// Returns an error if the delete operation fails.
func (cli *Client) Delete(ctx context.Context, name string) error {
return cli.dagStore.Delete(ctx, name)
}
// ListDAGOptions defines the options for listing DAGs from the DAG store.
type ListDAGOptions struct {
// Number of items to return per page
Limit *int
// Page number (for pagination)
Page *int
// Filter DAGs by matching name
Name *string
// Filter DAGs by matching tag
Tag *string
}
// List retrieves a paginated list of DAGs with their statuses.
// It accepts optional functional options for configuring the listing operation.
// Returns a paginated result containing DAG statuses, a list of errors encountered
// during the listing, and an error if the listing operation fails.
func (cli *Client) List(ctx context.Context, opts ...ListDAGOption) (*PaginatedResult[Status], []string, error) {
var options ListDAGOptions
for _, opt := range opts {
opt(&options)
}
if options.Limit == nil {
options.Limit = new(int)
*options.Limit = 100
}
if options.Page == nil {
options.Page = new(int)
*options.Page = 1
}
pg := NewPaginator(*options.Page, *options.Limit)
dags, errList, err := cli.dagStore.List(ctx, ListOptions{
Paginator: &pg,
Name: ptrOf(options.Name),
Tag: ptrOf(options.Tag),
})
if err != nil {
return nil, errList, err
}
var items []Status
for _, d := range dags.Items {
status, err := cli.runClient.GetLatestStatus(ctx, d)
if err != nil {
errList = append(errList, err.Error())
}
items = append(items, Status{
DAG: d,
Status: status,
Suspended: cli.IsSuspended(ctx, d.Location),
Error: err,
})
}
r := NewPaginatedResult(items, dags.TotalCount, pg)
return &r, errList, nil
}
// ListDAGOption is a functional option type for configuring ListDAGOptions.
type ListDAGOption func(*ListDAGOptions)
// WithLimit sets the limit for the number of items to return per page.
func WithLimit(limit int) ListDAGOption {
return func(opt *ListDAGOptions) {
opt.Limit = &limit
}
}
// WithPage sets the page number for pagination.
func WithPage(page int) ListDAGOption {
return func(opt *ListDAGOptions) {
opt.Page = &page
}
}
// WithName sets the file name filter for the DAGs to be listed.
func WithName(name string) ListDAGOption {
return func(opt *ListDAGOptions) {
opt.Name = &name
}
}
// WithTag sets the tag filter for the DAGs to be listed.
func WithTag(tag string) ListDAGOption {
return func(opt *ListDAGOptions) {
opt.Tag = &tag
}
}
// getDAG retrieves a DAG by its location.
// It returns the DAG and any error encountered during retrieval.
// If the DAG is nil but a location is provided, it returns an empty DAG with the location set.
func (cli *Client) getDAG(ctx context.Context, loc string) (*digraph.DAG, error) {
dagDetail, err := cli.dagStore.GetDetails(ctx, loc)
return cli.emptyDAGIfNil(dagDetail, loc), err
}
// Status retrieves the status of a DAG by its location.
// It returns a Status object containing the DAG, its latest run status,
// whether it's suspended, and any error encountered during retrieval.
func (cli *Client) Status(ctx context.Context, loc string) (Status, error) {
dag, err := cli.getDAG(ctx, loc)
if dag == nil {
// TODO: fix not to use location
dag = &digraph.DAG{Name: loc, Location: loc}
}
if err == nil {
// check the dag is correct in terms of graph
_, err = scheduler.NewExecutionGraph(dag.Steps...)
}
latestStatus, _ := cli.runClient.GetLatestStatus(ctx, dag)
return NewStatus(
dag, latestStatus, cli.IsSuspended(ctx, loc), err,
), err
}
// ToggleSuspend changes the suspension state of a DAG.
// It takes the location of the DAG and a boolean indicating whether to suspend it.
// Returns an error if the operation fails.
func (cli *Client) ToggleSuspend(_ context.Context, loc string, suspend bool) error {
return cli.dagStore.ToggleSuspend(loc, suspend)
}
// emptyDAGIfNil returns the provided DAG if it's not nil,
// otherwise it returns an empty DAG with the provided location.
// This is a helper method to avoid nil pointer dereferences.
func (*Client) emptyDAGIfNil(dag *digraph.DAG, dagLocation string) *digraph.DAG {
if dag != nil {
return dag
}
return &digraph.DAG{Location: dagLocation}
}
// IsSuspended checks if a DAG is currently suspended.
// It takes the ID of the DAG to check.
// Returns true if the DAG is suspended, false otherwise.
func (cli *Client) IsSuspended(_ context.Context, id string) bool {
return cli.dagStore.IsSuspended(id)
}
// TagList retrieves a list of all tags used in the DAG store.
// It returns a list of tags, a list of errors encountered during retrieval,
// and an error if the operation fails.
func (cli *Client) TagList(ctx context.Context) ([]string, []string, error) {
return cli.dagStore.TagList(ctx)
}
// ptrOf returns the value pointed to by p, or the zero value of type T if p is nil.
// This is a generic helper function for safely dereferencing pointers.
func ptrOf[T any](p *T) T {
var zero T
if p == nil {
return zero
}
return *p
}

View File

@ -0,0 +1,173 @@
package dagstore_test
import (
"fmt"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/test"
)
func TestDAGClient(t *testing.T) {
t.Parallel()
th := test.Setup(t)
t.Run("Update", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
// valid DAG
validDAG := `name: test DAG
steps:
- name: "1"
command: "true"
`
// Update Error: the DAG does not exist
err := cli.Update(ctx, "non-existing-dag", validDAG)
require.Error(t, err)
// create a new DAG file
id, err := cli.Create(ctx, "new-dag-file")
require.NoError(t, err)
// Update the DAG
err = cli.Update(ctx, id, validDAG)
require.NoError(t, err)
// Check the content of the DAG file
spec, err := cli.GetSpec(ctx, id)
require.NoError(t, err)
require.Equal(t, validDAG, spec)
})
t.Run("Remove", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
spec := `name: test DAG
steps:
- name: "1"
command: "true"
`
id, err := cli.Create(ctx, "test")
require.NoError(t, err)
err = cli.Update(ctx, id, spec)
require.NoError(t, err)
// check file
newSpec, err := cli.GetSpec(ctx, id)
require.NoError(t, err)
require.Equal(t, spec, newSpec)
// delete
err = cli.Delete(ctx, id)
require.NoError(t, err)
})
t.Run("Create", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
id, err := cli.Create(ctx, "test-dag")
require.NoError(t, err)
// Check if the new DAG is actually created.
filePath := filepath.Join(th.Config.Paths.DAGsDir, id+".yaml")
dag, err := digraph.Load(ctx, filePath)
require.NoError(t, err)
require.Equal(t, "test-dag", dag.Name)
})
t.Run("Rename", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
// Create a DAG to rename.
id, err := cli.Create(ctx, "old_name")
require.NoError(t, err)
_, err = cli.Status(ctx, filepath.Join(th.Config.Paths.DAGsDir, id+".yaml"))
require.NoError(t, err)
// Rename the file.
err = cli.Move(ctx, id, id+"_renamed")
// Check if the file is renamed.
require.NoError(t, err)
require.FileExists(t, filepath.Join(th.Config.Paths.DAGsDir, id+"_renamed.yaml"))
})
t.Run("TestClient_Empty", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
dag := th.DAG(t, filepath.Join("client", "empty_status.yaml"))
_, err := cli.Status(ctx, dag.Location)
require.NoError(t, err)
})
t.Run("TestClient_All", func(t *testing.T) {
th := test.Setup(t)
ctx := th.Context
cli := th.DAGClient
// Create a DAG
_, err := cli.Create(ctx, "test-dag1")
require.NoError(t, err)
_, err = cli.Create(ctx, "test-dag2")
require.NoError(t, err)
// Get all statuses.
result, errList, err := cli.List(ctx)
require.NoError(t, err)
require.Empty(t, errList)
require.Equal(t, 2, len(result.Items))
})
t.Run("InvalidDAGName", func(t *testing.T) {
ctx := th.Context
cli := th.DAGClient
dagStatus, err := cli.Status(ctx, "invalid-dag-name")
require.Error(t, err)
require.NotNil(t, dagStatus)
// Check the status contains error.
require.Error(t, dagStatus.Error)
})
}
func TestClient_GetTagList(t *testing.T) {
th := test.Setup(t)
ctx := th.Context
cli := th.DAGClient
// Create DAG List
for i := 0; i < 40; i++ {
spec := ""
id, err := cli.Create(ctx, "1test-dag-pagination"+fmt.Sprintf("%d", i))
require.NoError(t, err)
if i%2 == 0 {
spec = "tags: tag1,tag2\nsteps:\n - name: step1\n command: echo hello\n"
} else {
spec = "tags: tag2,tag3\nsteps:\n - name: step1\n command: echo hello\n"
}
if err = cli.Update(ctx, id, spec); err != nil {
t.Fatal(err)
}
}
tags, errs, err := cli.TagList(ctx)
require.NoError(t, err)
require.Equal(t, 0, len(errs))
require.Equal(t, 3, len(tags))
mapTags := make(map[string]bool)
for _, tag := range tags {
mapTags[tag] = true
}
require.True(t, mapTags["tag1"])
require.True(t, mapTags["tag2"])
require.True(t, mapTags["tag3"])
}

View File

@ -1,4 +1,4 @@
package local
package filestore
import (
"context"
@ -8,58 +8,91 @@ import (
"os"
"path"
"path/filepath"
"regexp"
"slices"
"strings"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/dagstore/grep"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/filecache"
"github.com/dagu-org/dagu/internal/persistence/grep"
)
var _ persistence.DAGStore = (*dagStoreImpl)(nil)
var _ dagstore.Store = (*fileStore)(nil)
// DAGStoreOption is a functional option for configuring the DAG store
type DAGStoreOption func(*DAGStoreOptions)
// Option is a functional option for configuring the DAG store
type Option func(*Options)
// DAGStoreOptions contains configuration options for the DAG store
type DAGStoreOptions struct {
FileCache *filecache.Cache[*digraph.DAG] // Optional cache for DAG objects
// Options contains configuration options for the DAG store
type Options struct {
FlagsBaseDir string // Base directory for flag storage
FileCache *fileutil.Cache[*digraph.DAG] // Optional cache for DAG objects
SearchPaths []string // Additional search paths for DAG files
}
// WithFileCache returns a DAGStoreOption that sets the file cache for DAG storage
func WithFileCache(cache *filecache.Cache[*digraph.DAG]) DAGStoreOption {
return func(o *DAGStoreOptions) {
func WithFileCache(cache *fileutil.Cache[*digraph.DAG]) Option {
return func(o *Options) {
o.FileCache = cache
}
}
// dagStoreImpl implements the DAGStore interface with local filesystem storage
type dagStoreImpl struct {
baseDir string // Base directory for DAG storage
fileCache *filecache.Cache[*digraph.DAG] // Optional cache for DAG objects
// WithFlagsBaseDir returns a DAGStoreOption that sets the base directory for flag storage
func WithFlagsBaseDir(dir string) Option {
return func(o *Options) {
o.FlagsBaseDir = dir
}
}
// NewDAGStore creates a new DAG store implementation using the local filesystem
func NewDAGStore(baseDir string, opts ...DAGStoreOption) persistence.DAGStore {
options := &DAGStoreOptions{}
// WithSearchPaths returns a DAGStoreOption that sets additional search paths for DAG files
func WithSearchPaths(paths []string) Option {
return func(o *Options) {
o.SearchPaths = paths
}
}
// fileStore implements the DAGStore interface with local filesystem storage
type fileStore struct {
baseDir string // Base directory for DAG storage
flagsBaseDir string // Base directory for flag storage
fileCache *fileutil.Cache[*digraph.DAG] // Optional cache for DAG objects
searchPaths []string // Additional search paths for DAG files
}
// New creates a new DAG store implementation using the local filesystem
func New(baseDir string, opts ...Option) dagstore.Store {
options := &Options{}
for _, opt := range opts {
opt(options)
}
if options.FlagsBaseDir == "" {
options.FlagsBaseDir = filepath.Join(baseDir, "flags")
}
uniqSearchPaths := make(map[string]struct{})
uniqSearchPaths[baseDir] = struct{}{}
uniqSearchPaths["."] = struct{}{}
for _, path := range options.SearchPaths {
uniqSearchPaths[path] = struct{}{}
}
searchPaths := make([]string, 0, len(uniqSearchPaths))
for path := range uniqSearchPaths {
searchPaths = append(searchPaths, path)
}
return &dagStoreImpl{
baseDir: baseDir,
fileCache: options.FileCache,
return &fileStore{
baseDir: baseDir,
flagsBaseDir: options.FlagsBaseDir,
fileCache: options.FileCache,
searchPaths: searchPaths,
}
}
// GetMetadata retrieves the metadata of a DAG by its name.
func (d *dagStoreImpl) GetMetadata(ctx context.Context, name string) (*digraph.DAG, error) {
func (d *fileStore) GetMetadata(ctx context.Context, name string) (*digraph.DAG, error) {
filePath, err := d.locateDAG(name)
if err != nil {
return nil, fmt.Errorf("failed to locate DAG %s: %w", name, err)
return nil, fmt.Errorf("failed to locate DAG %s in search paths (%v): %w", name, d.searchPaths, err)
}
if d.fileCache == nil {
return digraph.Load(ctx, filePath, digraph.OnlyMetadata(), digraph.WithoutEval())
@ -70,7 +103,7 @@ func (d *dagStoreImpl) GetMetadata(ctx context.Context, name string) (*digraph.D
}
// GetDetails retrieves the details of a DAG by its name.
func (d *dagStoreImpl) GetDetails(ctx context.Context, name string) (*digraph.DAG, error) {
func (d *fileStore) GetDetails(ctx context.Context, name string) (*digraph.DAG, error) {
filePath, err := d.locateDAG(name)
if err != nil {
return nil, fmt.Errorf("failed to locate DAG %s: %w", name, err)
@ -83,12 +116,12 @@ func (d *dagStoreImpl) GetDetails(ctx context.Context, name string) (*digraph.DA
}
// GetSpec retrieves the specification of a DAG by its name.
func (d *dagStoreImpl) GetSpec(_ context.Context, name string) (string, error) {
func (d *fileStore) GetSpec(_ context.Context, name string) (string, error) {
filePath, err := d.locateDAG(name)
if err != nil {
return "", persistence.ErrDAGNotFound
return "", dagstore.ErrDAGNotFound
}
dat, err := os.ReadFile(filePath)
dat, err := os.ReadFile(filePath) // nolint:gosec
if err != nil {
return "", err
}
@ -98,14 +131,14 @@ func (d *dagStoreImpl) GetSpec(_ context.Context, name string) (string, error) {
// FileMode used for newly created DAG files
const defaultPerm os.FileMode = 0600
func (d *dagStoreImpl) LoadSpec(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error) {
func (d *fileStore) LoadSpec(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error) {
// Validate the spec before saving it.
opts = append(slices.Clone(opts), digraph.WithoutEval())
return digraph.LoadYAML(ctx, spec, opts...)
}
// UpdateSpec updates the specification of a DAG by its name.
func (d *dagStoreImpl) UpdateSpec(ctx context.Context, name string, spec []byte) error {
func (d *fileStore) UpdateSpec(ctx context.Context, name string, spec []byte) error {
// Validate the spec before saving it.
dag, err := digraph.LoadYAML(ctx, spec, digraph.WithoutEval())
if err != nil {
@ -128,13 +161,13 @@ func (d *dagStoreImpl) UpdateSpec(ctx context.Context, name string, spec []byte)
}
// Create creates a new DAG with the given name and specification.
func (d *dagStoreImpl) Create(_ context.Context, name string, spec []byte) (string, error) {
func (d *fileStore) Create(_ context.Context, name string, spec []byte) (string, error) {
if err := d.ensureDirExist(); err != nil {
return "", fmt.Errorf("failed to create DAGs directory %s: %w", d.baseDir, err)
}
filePath := d.generateFilePath(name)
if fileExists(filePath) {
return "", persistence.ErrDAGAlreadyExists
return "", dagstore.ErrDAGAlreadyExists
}
if err := os.WriteFile(filePath, spec, defaultPerm); err != nil {
return "", fmt.Errorf("failed to write DAG %s: %w", name, err)
@ -143,7 +176,7 @@ func (d *dagStoreImpl) Create(_ context.Context, name string, spec []byte) (stri
}
// Delete deletes a DAG by its name.
func (d *dagStoreImpl) Delete(_ context.Context, name string) error {
func (d *fileStore) Delete(_ context.Context, name string) error {
filePath, err := d.locateDAG(name)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
@ -161,7 +194,7 @@ func (d *dagStoreImpl) Delete(_ context.Context, name string) error {
}
// ensureDirExist ensures that the base directory exists.
func (d *dagStoreImpl) ensureDirExist() error {
func (d *fileStore) ensureDirExist() error {
if !fileExists(d.baseDir) {
if err := os.MkdirAll(d.baseDir, 0750); err != nil {
return err
@ -171,13 +204,13 @@ func (d *dagStoreImpl) ensureDirExist() error {
}
// List lists DAGs with pagination support.
func (d *dagStoreImpl) List(ctx context.Context, opts persistence.ListOptions) (persistence.PaginatedResult[*digraph.DAG], []string, error) {
func (d *fileStore) List(ctx context.Context, opts dagstore.ListOptions) (dagstore.PaginatedResult[*digraph.DAG], []string, error) {
var dags []*digraph.DAG
var errList []string
var totalCount int
if opts.Paginator == nil {
p := persistence.DefaultPaginator()
p := dagstore.DefaultPaginator()
opts.Paginator = &p
}
@ -224,7 +257,7 @@ func (d *dagStoreImpl) List(ctx context.Context, opts persistence.ListOptions) (
return nil
})
result := persistence.NewPaginatedResult(
result := dagstore.NewPaginatedResult(
dags, totalCount, *opts.Paginator,
)
if err != nil {
@ -235,8 +268,8 @@ func (d *dagStoreImpl) List(ctx context.Context, opts persistence.ListOptions) (
}
// Grep searches for a pattern in all DAGs.
func (d *dagStoreImpl) Grep(ctx context.Context, pattern string) (
ret []*persistence.GrepResult, errs []string, err error,
func (d *fileStore) Grep(ctx context.Context, pattern string) (
ret []*dagstore.GrepResult, errs []string, err error,
) {
if pattern == "" {
// return empty result if pattern is empty
@ -257,12 +290,12 @@ func (d *dagStoreImpl) Grep(ctx context.Context, pattern string) (
for _, entry := range entries {
if fileutil.IsYAMLFile(entry.Name()) {
filePath := filepath.Join(d.baseDir, entry.Name())
dat, err := os.ReadFile(filePath)
dat, err := os.ReadFile(filePath) //nolint:gosec
if err != nil {
logger.Error(ctx, "Failed to read DAG file", "file", entry.Name(), "err", err)
continue
}
matches, err := grep.Grep(dat, fmt.Sprintf("(?i)%s", pattern), grep.DefaultOptions)
matches, err := grep.Grep(dat, fmt.Sprintf("(?i)%s", pattern), grep.DefaultGrepOptions)
if err != nil {
if errors.Is(err, grep.ErrNoMatch) {
continue
@ -275,7 +308,7 @@ func (d *dagStoreImpl) Grep(ctx context.Context, pattern string) (
errs = append(errs, fmt.Sprintf("check %s failed: %s", entry.Name(), err))
continue
}
ret = append(ret, &persistence.GrepResult{
ret = append(ret, &dagstore.GrepResult{
Name: strings.TrimSuffix(entry.Name(), path.Ext(entry.Name())),
DAG: dag,
Matches: matches,
@ -285,21 +318,54 @@ func (d *dagStoreImpl) Grep(ctx context.Context, pattern string) (
return ret, errs, nil
}
func (f fileStore) ToggleSuspend(id string, suspend bool) error {
if suspend {
return f.createFlag(fileName(id))
} else if f.IsSuspended(id) {
return f.deleteFlag(fileName(id))
}
return nil
}
func (f fileStore) IsSuspended(id string) bool {
return f.flagExists(fileName(id))
}
func fileName(id string) string {
return fmt.Sprintf("%s.suspend", normalizeFilename(id, "-"))
}
// https://github.com/sindresorhus/filename-reserved-regex/blob/master/index.js
var (
filenameReservedRegex = regexp.MustCompile(
`[<>:"/\\|?*\x00-\x1F]`,
)
filenameReservedWindowsNamesRegex = regexp.MustCompile(
`(?i)^(con|prn|aux|nul|com[0-9]|lpt[0-9])$`,
)
)
func normalizeFilename(str, replacement string) string {
s := filenameReservedRegex.ReplaceAllString(str, replacement)
s = filenameReservedWindowsNamesRegex.ReplaceAllString(s, replacement)
return strings.ReplaceAll(s, " ", replacement)
}
// Rename renames a DAG from oldID to newID.
func (d *dagStoreImpl) Rename(_ context.Context, oldID, newID string) error {
func (d *fileStore) Rename(_ context.Context, oldID, newID string) error {
oldFilePath, err := d.locateDAG(oldID)
if err != nil {
return fmt.Errorf("failed to locate DAG %s: %w", oldID, err)
}
newFilePath := d.generateFilePath(newID)
if fileExists(newFilePath) {
return persistence.ErrDAGAlreadyExists
return dagstore.ErrDAGAlreadyExists
}
return os.Rename(oldFilePath, newFilePath)
}
// generateFilePath generates the file path for a DAG by its name.
func (d *dagStoreImpl) generateFilePath(name string) string {
func (d *fileStore) generateFilePath(name string) string {
if strings.Contains(name, string(filepath.Separator)) {
filePath, err := filepath.Abs(name)
if err == nil {
@ -311,7 +377,7 @@ func (d *dagStoreImpl) generateFilePath(name string) string {
}
// locateDAG locates the DAG file by its name or path.
func (d *dagStoreImpl) locateDAG(nameOrPath string) (string, error) {
func (d *fileStore) locateDAG(nameOrPath string) (string, error) {
if strings.Contains(nameOrPath, string(filepath.Separator)) {
foundPath, err := findDAGFile(nameOrPath)
if err == nil {
@ -319,9 +385,12 @@ func (d *dagStoreImpl) locateDAG(nameOrPath string) (string, error) {
}
}
searchPaths := []string{".", d.baseDir}
for _, dir := range searchPaths {
candidatePath := filepath.Join(dir, nameOrPath)
for _, dir := range d.searchPaths {
absDir, err := filepath.Abs(dir)
if err != nil {
continue
}
candidatePath := filepath.Join(absDir, nameOrPath)
foundPath, err := findDAGFile(candidatePath)
if err == nil {
return foundPath, nil
@ -329,7 +398,7 @@ func (d *dagStoreImpl) locateDAG(nameOrPath string) (string, error) {
}
// DAG not found
return "", fmt.Errorf("workflow %s not found: %w", nameOrPath, os.ErrNotExist)
return "", fmt.Errorf("DAG %s not found: %w", nameOrPath, os.ErrNotExist)
}
// findDAGFile finds the DAG file with the given file name.
@ -352,7 +421,7 @@ func findDAGFile(name string) (string, error) {
}
// TagList lists all unique tags from the DAGs.
func (d *dagStoreImpl) TagList(ctx context.Context) ([]string, []string, error) {
func (d *fileStore) TagList(ctx context.Context) ([]string, []string, error) {
var (
errList []string
tagSet = make(map[string]struct{})
@ -388,6 +457,28 @@ func (d *dagStoreImpl) TagList(ctx context.Context) ([]string, []string, error)
return tagList, errList, nil
}
// CreateFlag creates the given file.
func (s *fileStore) createFlag(file string) error {
_ = os.MkdirAll(s.flagsBaseDir, flagPermission)
return os.WriteFile(path.Join(s.flagsBaseDir, file), []byte{}, flagPermission)
}
// flagExists returns true if the given file exists.
func (s *fileStore) flagExists(file string) bool {
_ = os.MkdirAll(s.flagsBaseDir, flagPermission)
_, err := os.Stat(path.Join(s.flagsBaseDir, file))
return err == nil
}
// deleteFlag deletes the given file.
func (s *fileStore) deleteFlag(file string) error {
_ = os.MkdirAll(s.flagsBaseDir, flagPermission)
return os.Remove(path.Join(s.flagsBaseDir, file))
}
// flagPermission is the default file permission for newly created files.
var flagPermission os.FileMode = 0750
// containsSearchText checks if the text contains the search string (case-insensitive).
func containsSearchText(text string, search string) bool {
return strings.Contains(strings.ToLower(text), strings.ToLower(search))

View File

@ -0,0 +1,26 @@
package filestore
import (
"os"
"testing"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/stretchr/testify/require"
)
func TestDAGStore(t *testing.T) {
tmpDir := fileutil.MustTempDir("test-suspend-checker")
defer func() {
_ = os.RemoveAll(tmpDir)
}()
dagStore := New(tmpDir)
require.False(t, dagStore.IsSuspended("test"))
err := dagStore.ToggleSuspend("test", true)
require.NoError(t, err)
require.True(t, dagStore.IsSuspended("test"))
}

View File

@ -7,6 +7,7 @@ import (
"regexp"
"strings"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/samber/lo"
)
@ -23,24 +24,17 @@ type Matcher interface {
Match(line string) bool
}
// Options represents grep options.
// GrepOptions represents grep options.
// If IsRegexp is true, the pattern is treated as a regular expression.
// Before and After are the number of lines before and after the matched line.
type Options struct {
type GrepOptions struct {
IsRegexp bool
Before int
After int
Matcher Matcher
}
// Match contains matched line number and line content.
type Match struct {
Line string
LineNumber int
StartLine int
}
var DefaultOptions = Options{
var DefaultGrepOptions = GrepOptions{
IsRegexp: true,
Before: 2,
After: 2,
@ -48,7 +42,7 @@ var DefaultOptions = Options{
// Grep reads data and returns lines that match the given pattern.
// If opts is nil, default options will be used.
func Grep(dat []byte, pattern string, opts Options) ([]*Match, error) {
func Grep(dat []byte, pattern string, opts GrepOptions) ([]*dagstore.Match, error) {
if pattern == "" {
return nil, ErrEmptyPattern
}
@ -67,7 +61,7 @@ func Grep(dat []byte, pattern string, opts Options) ([]*Match, error) {
}
// getMatcher returns a matcher based on the pattern and options.
func getMatcher(pattern string, opts Options) (Matcher, error) {
func getMatcher(pattern string, opts GrepOptions) (Matcher, error) {
if opts.Matcher != nil {
return opts.Matcher, nil
}
@ -97,15 +91,15 @@ func scanLines(dat []byte, matcher Matcher) ([]string, []int, error) {
}
// buildMatches constructs Match objects from matched line indices.
func buildMatches(lines []string, matches []int, opts Options) []*Match {
var ret []*Match
func buildMatches(lines []string, matches []int, opts GrepOptions) []*dagstore.Match {
var ret []*dagstore.Match
for _, m := range matches {
low := lo.Max([]int{0, m - opts.Before})
high := lo.Min([]int{len(lines), m + opts.After + 1})
matchText := strings.Join(lines[low:high], "\n")
ret = append(ret, &Match{
ret = append(ret, &dagstore.Match{
StartLine: low + 1,
LineNumber: m + 1,
Line: matchText,
@ -114,7 +108,7 @@ func buildMatches(lines []string, matches []int, opts Options) []*Match {
return ret
}
func defaultMatcher(pattern string, opts Options) (Matcher, error) {
func defaultMatcher(pattern string, opts GrepOptions) (Matcher, error) {
if opts.IsRegexp {
reg, err := regexp.Compile(pattern)
if err != nil {

View File

@ -5,6 +5,7 @@ import (
"path/filepath"
"testing"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/stretchr/testify/require"
)
@ -16,15 +17,15 @@ func TestGrep(t *testing.T) {
Name string
File string
Pattern string
Opts Options
Want []*Match
Opts GrepOptions
Want []*dagstore.Match
IsErr bool
}{
{
Name: "simple",
File: filepath.Join(dir, "test.txt"),
Pattern: "b",
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 2,
StartLine: 2,
@ -35,10 +36,10 @@ func TestGrep(t *testing.T) {
Name: "regexp",
File: filepath.Join(dir, "test.txt"),
Pattern: "^b.",
Opts: Options{
Opts: GrepOptions{
IsRegexp: true,
},
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 2,
StartLine: 2,
@ -49,10 +50,10 @@ func TestGrep(t *testing.T) {
Name: "before",
File: filepath.Join(dir, "test.txt"),
Pattern: "b",
Opts: Options{
Opts: GrepOptions{
Before: 1,
},
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 2,
StartLine: 1,
@ -63,11 +64,11 @@ func TestGrep(t *testing.T) {
Name: "before+after",
File: filepath.Join(dir, "test.txt"),
Pattern: "cc",
Opts: Options{
Opts: GrepOptions{
Before: 2,
After: 2,
},
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 3,
StartLine: 1,
@ -78,11 +79,11 @@ func TestGrep(t *testing.T) {
Name: "before+after,firstline",
File: filepath.Join(dir, "test.txt"),
Pattern: "aa",
Opts: Options{
Opts: GrepOptions{
Before: 1,
After: 1,
},
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 1,
StartLine: 1,
@ -93,11 +94,11 @@ func TestGrep(t *testing.T) {
Name: "before+after,lastline",
File: filepath.Join(dir, "test.txt"),
Pattern: "ee",
Opts: Options{
Opts: GrepOptions{
Before: 1,
After: 1,
},
Want: []*Match{
Want: []*dagstore.Match{
{
LineNumber: 5,
StartLine: 4,
@ -126,7 +127,7 @@ func TestGrep(t *testing.T) {
Name: "invalid regexp",
File: filepath.Join(dir, "test.txt"),
Pattern: "(aa",
Opts: Options{
Opts: GrepOptions{
IsRegexp: true,
},
IsErr: true,

View File

@ -1,4 +1,4 @@
package persistence
package dagstore
const (
defaultPerPage = 50

View File

@ -1,4 +1,4 @@
package persistence
package dagstore
import (
"reflect"

View File

@ -0,0 +1,38 @@
package dagstore
import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/runstore"
)
func NewStatus(
dag *digraph.DAG, status runstore.Status, suspended bool, err error,
) Status {
var file string
if dag.Location != "" {
file = dag.Location
}
return Status{
File: file,
DAG: dag,
Status: status,
Suspended: suspended,
Error: err,
}
}
type Status struct {
File string
DAG *digraph.DAG
Status runstore.Status
Suspended bool
Error error
}
// ErrorAsString converts the error to a string if it exists, otherwise returns an empty string.
func (s Status) ErrorAsString() string {
if s.Error == nil {
return ""
}
return s.Error.Error()
}

View File

@ -0,0 +1,72 @@
package dagstore
import (
"context"
"errors"
"github.com/dagu-org/dagu/internal/digraph"
)
// Errors for DAG file operations
var (
ErrDAGAlreadyExists = errors.New("DAG already exists")
ErrDAGNotFound = errors.New("DAG is not found")
)
// Store manages the DAG files and their metadata (e.g., tags, suspend status).
type Store interface {
// Create stores a new DAG definition with the given name and returns its ID
Create(ctx context.Context, name string, spec []byte) (string, error)
// Delete removes a DAG definition by name
Delete(ctx context.Context, name string) error
// List returns a paginated list of DAG definitions with filtering options
List(ctx context.Context, params ListOptions) (PaginatedResult[*digraph.DAG], []string, error)
// GetMetadata retrieves only the metadata of a DAG definition (faster than full load)
GetMetadata(ctx context.Context, name string) (*digraph.DAG, error)
// GetDetails retrieves the complete DAG definition including all fields
GetDetails(ctx context.Context, name string) (*digraph.DAG, error)
// Grep searches for a pattern in all DAG definitions and returns matching results
Grep(ctx context.Context, pattern string) (ret []*GrepResult, errs []string, err error)
// Rename changes a DAG's identifier from oldID to newID
Rename(ctx context.Context, oldID, newID string) error
// GetSpec retrieves the raw YAML specification of a DAG
GetSpec(ctx context.Context, name string) (string, error)
// UpdateSpec modifies the specification of an existing DAG
UpdateSpec(ctx context.Context, name string, spec []byte) error
// LoadSpec loads a DAG from a YAML file and returns the DAG object
LoadSpec(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error)
// TagList returns all unique tags across all DAGs with any errors encountered
TagList(ctx context.Context) ([]string, []string, error)
// ToggleSuspend changes the suspension state of a DAG by ID
ToggleSuspend(id string, suspend bool) error
// IsSuspended checks if a DAG is currently suspended
IsSuspended(id string) bool
}
// ListOptions contains parameters for paginated DAG listing
type ListOptions struct {
Paginator *Paginator
Name string // Optional name filter
Tag string // Optional tag filter
}
// ListResult contains the result of a paginated DAG listing operation
type ListResult struct {
DAGs []*digraph.DAG // The list of DAGs for the current page
Count int // Total count of DAGs matching the filter
Errors []string // Any errors encountered during listing
}
// GrepResult represents the result of a pattern search within a DAG definition
type GrepResult struct {
Name string // Name of the DAG
DAG *digraph.DAG // The DAG object
Matches []*Match // Matching lines and their context
}
// Match contains matched line number and line content.
type Match struct {
Line string
LineNumber int
StartLine int
}

View File

@ -255,11 +255,11 @@ func buildDotenv(ctx BuildContext, spec *definition, dag *DAG) error {
case string:
dag.Dotenv = append(dag.Dotenv, e)
default:
return wrapError("dotenv", e, ErrDotenvMustBeStringOrArray)
return wrapError("dotenv", e, ErrDotEnvMustBeStringOrArray)
}
}
default:
return wrapError("dotenv", v, ErrDotenvMustBeStringOrArray)
return wrapError("dotenv", v, ErrDotEnvMustBeStringOrArray)
}
if !ctx.opts.NoEval {

View File

@ -15,8 +15,6 @@ import (
)
func TestBuild(t *testing.T) {
t.Parallel()
t.Run("SkipIfSuccessful", func(t *testing.T) {
t.Parallel()
@ -27,7 +25,7 @@ func TestBuild(t *testing.T) {
t.Parallel()
th := testLoad(t, "params_with_substitution.yaml")
th.AssertParam(t, "1=x", "2=x")
th.AssertParam(t, "1=TEST_PARAM", "2=TEST_PARAM")
})
t.Run("ParamsWithQuotedValues", func(t *testing.T) {
t.Parallel()

View File

@ -17,9 +17,9 @@ var ErrConditionNotMet = fmt.Errorf("condition was not met")
// The condition can be a command substitution or an environment variable.
// The expected value must be a string without any substitutions.
type Condition struct {
Command string `json:"Command,omitempty"` // Command to evaluate
Condition string `json:"Condition,omitempty"` // Condition to evaluate
Expected string `json:"Expected,omitempty"` // Expected value
Command string `json:"command,omitempty"` // Command to evaluate
Condition string `json:"condition,omitempty"` // Condition to evaluate
Expected string `json:"expected,omitempty"` // Expected value
}
func (c Condition) Validate() error {

View File

@ -30,7 +30,7 @@ func NewRootDAG(name, requestID string) RootDAG {
}
func GetDAGByName(ctx context.Context, name string) (*DAG, error) {
c := GetContext(ctx)
c := GetExecContext(ctx)
return c.client.GetDAG(ctx, name)
}

View File

@ -3,6 +3,7 @@ package digraph
import (
// nolint // gosec
"crypto/md5"
"encoding/json"
"fmt"
"path/filepath"
"strings"
@ -21,62 +22,62 @@ const (
// DAG contains all information about a workflow.
type DAG struct {
// Location is the absolute path to the DAG file.
Location string `json:"Location,omitempty"`
Location string `json:"location,omitempty"`
// Group is the group name of the DAG. This is optional.
Group string `json:"Group,omitempty"`
Group string `json:"group,omitempty"`
// Name is the name of the DAG. The default is the filename without the extension.
Name string `json:"Name,omitempty"`
Name string `json:"name,omitempty"`
// Dotenv is the path to the dotenv file. This is optional.
Dotenv []string `json:"Dotenv,omitempty"`
Dotenv []string `json:"dotenv,omitempty"`
// Tags contains the list of tags for the DAG. This is optional.
Tags []string `json:"Tags,omitempty"`
Tags []string `json:"tags,omitempty"`
// Description is the description of the DAG. This is optional.
Description string `json:"Description,omitempty"`
Description string `json:"description,omitempty"`
// Schedule configuration for starting, stopping, and restarting the DAG.
Schedule []Schedule `json:"Schedule,omitempty"`
StopSchedule []Schedule `json:"StopSchedule,omitempty"`
RestartSchedule []Schedule `json:"RestartSchedule,omitempty"`
Schedule []Schedule `json:"schedule,omitempty"`
StopSchedule []Schedule `json:"stopSchedule,omitempty"`
RestartSchedule []Schedule `json:"restartSchedule,omitempty"`
// SkipIfSuccessful indicates whether to skip the DAG if it was successful previously.
// E.g., when the DAG has already been executed manually before the scheduled time.
SkipIfSuccessful bool `json:"SkipIfSuccessful,omitempty"`
SkipIfSuccessful bool `json:"skipIfSuccessful,omitempty"`
// Env contains a list of environment variables to be set before running the DAG.
Env []string `json:"Env,omitempty"`
Env []string `json:"env,omitempty"`
// LogDir is the directory where the logs are stored.
LogDir string `json:"LogDir,omitempty"`
LogDir string `json:"logDir,omitempty"`
// DefaultParams contains the default parameters to be passed to the DAG.
DefaultParams string `json:"DefaultParams,omitempty"`
DefaultParams string `json:"defaultParams,omitempty"`
// Params contains the list of parameters to be passed to the DAG.
Params []string `json:"Params,omitempty"`
Params []string `json:"params,omitempty"`
// Steps contains the list of steps in the DAG.
Steps []Step `json:"Steps,omitempty"`
Steps []Step `json:"steps,omitempty"`
// HandlerOn contains the steps to be executed on different events.
HandlerOn HandlerOn `json:"HandlerOn,omitempty"`
HandlerOn HandlerOn `json:"handlerOn,omitempty"`
// Preconditions contains the conditions to be met before running the DAG.
Preconditions []Condition `json:"Preconditions,omitempty"`
Preconditions []Condition `json:"preconditions,omitempty"`
// SMTP contains the SMTP configuration.
SMTP *SMTPConfig `json:"Smtp,omitempty"`
SMTP *SMTPConfig `json:"smtp,omitempty"`
// ErrorMail contains the mail configuration for errors.
ErrorMail *MailConfig `json:"ErrorMail,omitempty"`
ErrorMail *MailConfig `json:"errorMail,omitempty"`
// InfoMail contains the mail configuration for informational messages.
InfoMail *MailConfig `json:"InfoMail,omitempty"`
InfoMail *MailConfig `json:"infoMail,omitempty"`
// MailOn contains the conditions to send mail.
MailOn *MailOn `json:"MailOn,omitempty"`
MailOn *MailOn `json:"mailOn,omitempty"`
// Timeout specifies the maximum execution time of the DAG task.
Timeout time.Duration `json:"Timeout,omitempty"`
Timeout time.Duration `json:"timeout,omitempty"`
// Delay is the delay before starting the DAG.
Delay time.Duration `json:"Delay,omitempty"`
Delay time.Duration `json:"delay,omitempty"`
// RestartWait is the time to wait before restarting the DAG.
RestartWait time.Duration `json:"RestartWait,omitempty"`
RestartWait time.Duration `json:"restartWait,omitempty"`
// MaxActiveRuns specifies the maximum concurrent steps to run in an execution.
MaxActiveRuns int `json:"MaxActiveRuns,omitempty"`
MaxActiveRuns int `json:"maxActiveRuns,omitempty"`
// MaxCleanUpTime is the maximum time to wait for cleanup when the DAG is stopped.
MaxCleanUpTime time.Duration `json:"MaxCleanUpTime,omitempty"`
MaxCleanUpTime time.Duration `json:"maxCleanUpTime,omitempty"`
// HistRetentionDays is the number of days to keep the history.
HistRetentionDays int `json:"HistRetentionDays,omitempty"`
HistRetentionDays int `json:"histRetentionDays,omitempty"`
}
// FileID returns the file ID of the local DAG file.
func (d *DAG) FileID() string {
// FileName returns the filename of the DAG without the extension.
func (d *DAG) FileName() string {
if d.Location == "" {
return ""
}
@ -86,39 +87,78 @@ func (d *DAG) FileID() string {
// Schedule contains the cron expression and the parsed cron schedule.
type Schedule struct {
// Expression is the cron expression.
Expression string `json:"Expression"`
Expression string `json:"expression"`
// Parsed is the parsed cron schedule.
Parsed cron.Schedule `json:"-"`
}
// MarshalJSON implements the json.Marshaler interface.
func (s Schedule) MarshalJSON() ([]byte, error) {
// Create a temporary struct for marshaling
type ScheduleAlias struct {
Expression string `json:"expression"`
}
return json.Marshal(ScheduleAlias{
Expression: s.Expression,
})
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// and also parses the cron expression to populate the Parsed field.
func (s *Schedule) UnmarshalJSON(data []byte) error {
// Create a temporary struct for unmarshaling
type ScheduleAlias struct {
Expression string `json:"expression"`
}
var alias ScheduleAlias
if err := json.Unmarshal(data, &alias); err != nil {
return err
}
s.Expression = alias.Expression
// Parse the cron expression to populate the Parsed field
if s.Expression != "" {
parsed, err := cron.ParseStandard(s.Expression)
if err != nil {
return fmt.Errorf("invalid cron expression %q: %w", s.Expression, err)
}
s.Parsed = parsed
}
return nil
}
// HandlerOn contains the steps to be executed on different events in the DAG.
type HandlerOn struct {
Failure *Step `json:"Failure,omitempty"`
Success *Step `json:"Success,omitempty"`
Cancel *Step `json:"Cancel,omitempty"`
Exit *Step `json:"Exit,omitempty"`
Failure *Step `json:"failure,omitempty"`
Success *Step `json:"success,omitempty"`
Cancel *Step `json:"cancel,omitempty"`
Exit *Step `json:"exit,omitempty"`
}
// MailOn contains the conditions to send mail.
type MailOn struct {
Failure bool `json:"Failure,omitempty"`
Success bool `json:"Success,omitempty"`
Failure bool `json:"failure,omitempty"`
Success bool `json:"success,omitempty"`
}
// SMTPConfig contains the SMTP configuration.
type SMTPConfig struct {
Host string `json:"Host,omitempty"`
Port string `json:"Port,omitempty"`
Username string `json:"Username,omitempty"`
Password string `json:"Password,omitempty"`
Host string `json:"host,omitempty"`
Port string `json:"port,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
}
// MailConfig contains the mail configuration.
type MailConfig struct {
From string `json:"From,omitempty"`
To string `json:"To,omitempty"`
Prefix string `json:"Prefix,omitempty"`
AttachLogs bool `json:"AttachLogs,omitempty"`
From string `json:"from,omitempty"`
To string `json:"to,omitempty"`
Prefix string `json:"prefix,omitempty"`
AttachLogs bool `json:"attachLogs,omitempty"`
}
// HandlerType is the type of the handler.
@ -252,7 +292,7 @@ func (d *DAG) initializeDefaults() {
d.setupHandlers(workDir)
}
// setupSteps initializes all workflow steps
// setupSteps initializes all steps
func (d *DAG) setupSteps(workDir string) {
for i := range d.Steps {
d.Steps[i].setup(workDir)

View File

@ -4,9 +4,11 @@ import (
"encoding/json"
"path/filepath"
"testing"
"time"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/test"
"github.com/robfig/cron/v3"
"github.com/stretchr/testify/require"
)
@ -38,12 +40,63 @@ func TestUnixSocket(t *testing.T) {
})
}
func TestMashalJSON(t *testing.T) {
func TestMarshalJSON(t *testing.T) {
th := test.Setup(t)
t.Run("MarshalJSON", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("digraph", "default.yaml"))
dat, err := json.Marshal(dag.DAG)
_, err := json.Marshal(dag.DAG)
require.NoError(t, err)
println(string(dat))
})
}
func TestScheduleJSON(t *testing.T) {
t.Run("MarshalUnmarshalJSON", func(t *testing.T) {
// Create a Schedule with a valid cron expression
original := digraph.Schedule{
Expression: "0 0 * * *", // Run at midnight every day
}
// Parse the expression to populate the Parsed field
parsed, err := cron.ParseStandard(original.Expression)
require.NoError(t, err)
original.Parsed = parsed
// Marshal to JSON
data, err := json.Marshal(original)
require.NoError(t, err)
// Verify JSON format (camelCase field names)
jsonStr := string(data)
require.Contains(t, jsonStr, `"expression":"0 0 * * *"`)
require.NotContains(t, jsonStr, `"Expression"`)
require.NotContains(t, jsonStr, `"Parsed"`)
// Unmarshal back to a Schedule struct
var unmarshaled digraph.Schedule
err = json.Unmarshal(data, &unmarshaled)
require.NoError(t, err)
// Verify the unmarshaled struct has the correct values
require.Equal(t, original.Expression, unmarshaled.Expression)
// Verify the Parsed field was populated correctly
require.NotNil(t, unmarshaled.Parsed)
// Test that the next scheduled time is the same for both objects
// This verifies that the Parsed field was correctly populated during unmarshaling
now := time.Now()
expectedNext := original.Parsed.Next(now)
actualNext := unmarshaled.Parsed.Next(now)
require.Equal(t, expectedNext, actualNext)
})
t.Run("UnmarshalInvalidCron", func(t *testing.T) {
// Test unmarshaling with an invalid cron expression
invalidJSON := `{"expression":"invalid cron"}`
var schedule digraph.Schedule
err := json.Unmarshal([]byte(invalidJSON), &schedule)
require.Error(t, err)
require.Contains(t, err.Error(), "invalid cron expression")
})
}

View File

@ -58,9 +58,8 @@ var (
ErrExecutorConfigValueMustBeMap = errors.New("executor.config value must be a map")
ErrExecutorHasInvalidKey = errors.New("executor has invalid key")
ErrExecutorConfigMustBeStringOrMap = errors.New("executor config must be string or map")
ErrDotenvMustBeStringOrArray = errors.New("dotenv must be a string or an array of strings")
ErrDotEnvMustBeStringOrArray = errors.New("dotenv must be a string or an array of strings")
ErrPreconditionMustBeArrayOrString = errors.New("precondition must be a string or an array of strings")
ErrPreconditionKeyMustBeString = errors.New("precondition key must be a string")
ErrPreconditionValueMustBeString = errors.New("precondition value must be a string")
ErrPreconditionHasInvalidKey = errors.New("precondition has invalid key")
ErrContinueOnOutputMustBeStringOrArray = errors.New("continueOn.Output must be a string or an array of strings")

View File

@ -72,7 +72,7 @@ func (e *subDAG) Run(ctx context.Context) error {
}
if e.requestID == "" {
return fmt.Errorf("request ID is not set")
return fmt.Errorf("request ID is not set for sub-DAG")
}
e.lock.Lock()
@ -102,6 +102,7 @@ func (e *subDAG) Run(ctx context.Context) error {
cmd.Stdout = e.stdout
}
if e.stderr != nil {
// TODO: Separate stderr and stdout for sub-DAG to avoid mixing logger output
cmd.Stderr = e.stderr
}

View File

@ -18,6 +18,12 @@ import (
"github.com/goccy/go-yaml"
)
// Errors for loading DAGs
var (
ErrNameOrPathRequired = errors.New("name or path is required")
ErrInvalidJSONFile = errors.New("invalid JSON file")
)
// LoadOptions contains options for loading a DAG.
type LoadOptions struct {
name string // Name of the DAG.
@ -75,14 +81,35 @@ func WithName(name string) LoadOption {
}
// WithDAGsDir sets the directory containing the DAG files.
// This directory is used as the base path for resolving relative DAG file paths.
// When a DAG is loaded by name rather than absolute path, the system will look
// for the DAG file in this directory. If not specified, the current working
// directory is used as the default.
func WithDAGsDir(dagsDir string) LoadOption {
return func(o *LoadOptions) {
o.dagsDir = dagsDir
}
}
// Load loads the DAG from the given file with the specified options.
func Load(ctx context.Context, dag string, opts ...LoadOption) (*DAG, error) {
// Load loads a Directed Acyclic Graph (DAG) from a file path or name with the given options.
//
// The function handles different input formats:
//
// 1. Absolute paths:
// - YAML files (.yaml/.yml): Processed with dynamic evaluation, including base configs,
// parameters, and environment variables
//
// 2. Relative paths or filenames:
// - Resolved against the DAGsDir specified in options
// - If DAGsDir is not provided, the current working directory is used
// - For YAML files, the extension is optional
//
// This approach provides a flexible way to load DAG definitions from multiple sources
// while supporting customization through the LoadOptions.
func Load(ctx context.Context, nameOrPath string, opts ...LoadOption) (*DAG, error) {
if nameOrPath == "" {
return nil, ErrNameOrPathRequired
}
var options LoadOptions
for _, opt := range opts {
opt(&options)
@ -99,7 +126,7 @@ func Load(ctx context.Context, dag string, opts ...LoadOption) (*DAG, error) {
DAGsDir: options.dagsDir,
},
}
return loadDAG(buildContext, dag)
return loadDAG(buildContext, nameOrPath)
}
// LoadYAML loads the DAG from the given YAML data with the specified options.
@ -143,7 +170,7 @@ func LoadBaseConfig(ctx BuildContext, file string) (*DAG, error) {
}
// Load the raw data from the file.
raw, err := readFile(file)
raw, err := readYAMLFile(file)
if err != nil {
return nil, err
}
@ -164,8 +191,8 @@ func LoadBaseConfig(ctx BuildContext, file string) (*DAG, error) {
}
// loadDAG loads the DAG from the given file.
func loadDAG(ctx BuildContext, dag string) (*DAG, error) {
filePath, err := resolveYamlFilePath(ctx, dag)
func loadDAG(ctx BuildContext, nameOrPath string) (*DAG, error) {
filePath, err := resolveYamlFilePath(ctx, nameOrPath)
if err != nil {
return nil, err
}
@ -177,7 +204,7 @@ func loadDAG(ctx BuildContext, dag string) (*DAG, error) {
return nil, err
}
raw, err := readFile(filePath)
raw, err := readYAMLFile(filePath)
if err != nil {
return nil, err
}
@ -287,8 +314,8 @@ func (*mergeTransformer) Transformer(
return nil
}
// readFile reads the contents of the file into a map.
func readFile(file string) (cfg map[string]any, err error) {
// readYAMLFile reads the contents of the file into a map.
func readYAMLFile(file string) (cfg map[string]any, err error) {
data, err := os.ReadFile(file) //nolint:gosec
if err != nil {
return nil, fmt.Errorf("failed to read file %q: %v", file, err)

View File

@ -25,16 +25,32 @@ type NodeData struct {
}
type NodeState struct {
Status NodeStatus
Log string
StartedAt time.Time
// Status represents the state of the node.
Status NodeStatus
// Log is the log file path from the node.
Log string
// StartedAt is the time when the node started.
StartedAt time.Time
// FinishedAt is the time when the node finished.
FinishedAt time.Time
// RetryCount is the number of retries happened based on the retry policy.
RetryCount int
RetriedAt time.Time
DoneCount int
Error error
ExitCode int
RequestID string
// RetriedAt is the time when the node was retried last time.
RetriedAt time.Time
// DoneCount is the number of times the node was executed.
DoneCount int
// Error is the error that the executor encountered.
Error error
// ExitCode is the exit code that the command exited with.
// It only makes sense when the node is a command executor.
ExitCode int
// SubRuns is the list of sub-runs that this node has executed.
SubRuns []SubRun
}
type SubRun struct {
// RequestID is the request ID of the sub-run.
RequestID string
}
type NodeStatus int
@ -117,18 +133,20 @@ func (s *Data) Data() NodeData {
return s.inner
}
func (s *Data) RequestID() (string, error) {
func (s *Data) SubRunRequestID() (string, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.inner.State.RequestID == "" {
reqID, err := generateRequestID()
if err != nil {
return "", fmt.Errorf("failed to generate request ID: %w", err)
}
s.inner.State.RequestID = reqID
// If subRuns is not empty, return the first child's request ID.
if len(s.inner.State.SubRuns) > 0 {
return s.inner.State.SubRuns[0].RequestID, nil
}
return s.inner.State.RequestID, nil
// Generate a new request ID for the current node.
r, err := generateRequestID()
if err != nil {
return "", fmt.Errorf("failed to generate request ID: %w", err)
}
s.inner.State.SubRuns = append(s.inner.State.SubRuns, SubRun{RequestID: r})
return r, nil
}
func (s *Data) Setup(ctx context.Context, logFile string, startedAt time.Time) error {
@ -348,10 +366,10 @@ func (n *Data) ClearState() {
n.mu.Lock()
defer n.mu.Unlock()
// requestID needs to be retained for sub-DAGs
requestID := n.inner.State.RequestID
// The data of sub-runs need to be preserved to retain their request IDs
subRuns := n.inner.State.SubRuns
n.inner.State = NodeState{}
n.inner.State.RequestID = requestID
n.inner.State.SubRuns = subRuns
}
func (n *Data) MarkError(err error) {

View File

@ -201,10 +201,13 @@ func (n *Node) setupExecutor(ctx context.Context) (executor.Executor, error) {
// If the command is a sub-DAG, we need to set the request ID.
if subDAG, ok := cmd.(executor.SubDAG); ok {
reqID, err := n.RequestID()
reqID, err := n.SubRunRequestID()
if err != nil {
return nil, fmt.Errorf("failed to determine request ID for sub-DAG: %w", err)
}
if reqID == "" {
return nil, fmt.Errorf("request ID is empty for sub-DAG")
}
subDAG.SetRequestID(reqID)
}

View File

@ -109,7 +109,7 @@ type stepDef struct {
SignalOnStop *string
// Deprecated: Don't use this field
Call *callFuncDef // deprecated
// Run is the name of a DAG to run as a sub DAG (child DAG).
// Run is the name of a DAG to run as a sub-run
Run string
// Params specifies the parameters for the sub DAG.
Params string

View File

@ -11,52 +11,52 @@ import (
// It marshals/unmarshals to/from JSON when it is saved in the execution history.
type Step struct {
// Name is the name of the step.
Name string `json:"Name"`
Name string `json:"name"`
// Description is the description of the step. This is optional.
Description string `json:"Description,omitempty"`
Description string `json:"description,omitempty"`
// Shell is the shell program to execute the command. This is optional.
Shell string `json:"Shell,omitempty"`
Shell string `json:"shell,omitempty"`
// OutputVariables stores the output variables for the following steps.
// It only contains the local output variables.
OutputVariables *SyncMap `json:"OutputVariables,omitempty"`
OutputVariables *SyncMap `json:"outputVariables,omitempty"`
// Dir is the working directory for the step.
Dir string `json:"Dir,omitempty"`
Dir string `json:"dir,omitempty"`
// ExecutorConfig contains the configuration for the executor.
ExecutorConfig ExecutorConfig `json:"ExecutorConfig,omitempty"`
ExecutorConfig ExecutorConfig `json:"executorConfig,omitempty"`
// CmdWithArgs is the command with arguments (only display purpose).
CmdWithArgs string `json:"CmdWithArgs,omitempty"`
CmdWithArgs string `json:"cmdWithArgs,omitempty"`
// CmdArgsSys is the command with arguments for the system.
CmdArgsSys string `json:"CmdArgsSys,omitempty"`
CmdArgsSys string `json:"cmdArgsSys,omitempty"`
// Command specifies only the command without arguments.
Command string `json:"Command,omitempty"`
Command string `json:"command,omitempty"`
// ShellCmdArgs is the shell command with arguments.
ShellCmdArgs string `json:"ShellCmdArgs,omitempty"`
ShellCmdArgs string `json:"shellCmdArgs,omitempty"`
// Script is the script to be executed.
Script string `json:"Script,omitempty"`
Script string `json:"script,omitempty"`
// Args contains the arguments for the command.
Args []string `json:"Args,omitempty"`
Args []string `json:"args,omitempty"`
// Stdout is the file to store the standard output.
Stdout string `json:"Stdout,omitempty"`
Stdout string `json:"stdout,omitempty"`
// Stderr is the file to store the standard error.
Stderr string `json:"Stderr,omitempty"`
Stderr string `json:"stderr,omitempty"`
// Output is the variable name to store the output.
Output string `json:"Output,omitempty"`
Output string `json:"output,omitempty"`
// Depends contains the list of step names to depend on.
Depends []string `json:"Depends,omitempty"`
Depends []string `json:"depends,omitempty"`
// ContinueOn contains the conditions to continue on failure or skipped.
ContinueOn ContinueOn `json:"ContinueOn,omitempty"`
ContinueOn ContinueOn `json:"continueOn,omitempty"`
// RetryPolicy contains the retry policy for the step.
RetryPolicy RetryPolicy `json:"RetryPolicy,omitempty"`
RetryPolicy RetryPolicy `json:"retryPolicy,omitempty"`
// RepeatPolicy contains the repeat policy for the step.
RepeatPolicy RepeatPolicy `json:"RepeatPolicy,omitempty"`
RepeatPolicy RepeatPolicy `json:"repeatPolicy,omitempty"`
// MailOnError is the flag to send mail on error.
MailOnError bool `json:"MailOnError,omitempty"`
MailOnError bool `json:"mailOnError,omitempty"`
// Preconditions contains the conditions to be met before running the step.
Preconditions []Condition `json:"Preconditions,omitempty"`
Preconditions []Condition `json:"preconditions,omitempty"`
// SignalOnStop is the signal to send on stop.
SignalOnStop string `json:"SignalOnStop,omitempty"`
SignalOnStop string `json:"signalOnStop,omitempty"`
// SubDAG contains the information about a sub DAG to be executed.
SubDAG *SubDAG `json:"Sub,omitempty"`
SubDAG *SubDAG `json:"sub,omitempty"`
}
// setup sets the default values for the step.
@ -90,8 +90,8 @@ func (s *Step) String() string {
// SubDAG contains information about a sub DAG to be executed.
type SubDAG struct {
Name string `json:"Name,omitempty"`
Params string `json:"Params,omitempty"`
Name string `json:"name,omitempty"`
Params string `json:"params,omitempty"`
}
// ExecutorTypeSubLegacy is defined here in order to parse
@ -103,8 +103,8 @@ const ExecutorTypeSub = "sub"
type ExecutorConfig struct {
// Type represents one of the registered executors.
// See `executor.Register` in `internal/executor/executor.go`.
Type string `json:"Type,omitempty"`
Config map[string]any `json:"Config,omitempty"` // Config contains executor-specific configuration.
Type string `json:"type,omitempty"`
Config map[string]any `json:"config,omitempty"` // Config contains executor-specific configuration.
}
// IsCommand returns true if the executor is a command
@ -115,23 +115,23 @@ func (e ExecutorConfig) IsCommand() bool {
// RetryPolicy contains the retry policy for a step.
type RetryPolicy struct {
// Limit is the number of retries allowed.
Limit int `json:"Limit,omitempty"`
Limit int `json:"limit,omitempty"`
// Interval is the time to wait between retries.
Interval time.Duration `json:"Interval,omitempty"`
Interval time.Duration `json:"interval,omitempty"`
// LimitStr is the string representation of the limit.
LimitStr string `json:"LimitStr,omitempty"`
LimitStr string `json:"limitStr,omitempty"`
// IntervalSecStr is the string representation of the interval.
IntervalSecStr string `json:"IntervalSecStr,omitempty"`
IntervalSecStr string `json:"intervalSecStr,omitempty"`
// ExitCodes is the list of exit codes that should trigger a retry.
ExitCodes []int `json:"ExitCode,omitempty"`
ExitCodes []int `json:"exitCode,omitempty"`
}
// RepeatPolicy contains the repeat policy for a step.
type RepeatPolicy struct {
// Repeat determines if the step should be repeated.
Repeat bool `json:"Repeat,omitempty"`
Repeat bool `json:"repeat,omitempty"`
// Interval is the time to wait between repeats.
Interval time.Duration `json:"Interval,omitempty"`
Interval time.Duration `json:"interval,omitempty"`
}
// ContinueOn contains the conditions to continue on failure or skipped.
@ -140,9 +140,9 @@ type RepeatPolicy struct {
// A step can be skipped when the preconditions are not met.
// Then if the ContinueOn.Skip is set, the step will continue to the next step.
type ContinueOn struct {
Failure bool `json:"Failure,omitempty"` // Failure is the flag to continue to the next step on failure.
Skipped bool `json:"Skipped,omitempty"` // Skipped is the flag to continue to the next step on skipped.
ExitCode []int `json:"ExitCode,omitempty"` // ExitCode is the list of exit codes to continue to the next step.
Output []string `json:"Output,omitempty"` // Output is the list of output (stdout/stderr) to continue to the next step.
MarkSuccess bool `json:"MarkSuccess,omitempty"` // MarkSuccess is the flag to mark the step as success when the condition is met.
Failure bool `json:"failure,omitempty"` // Failure is the flag to continue to the next step on failure.
Skipped bool `json:"skipped,omitempty"` // Skipped is the flag to continue to the next step on skipped.
ExitCode []int `json:"exitCode,omitempty"` // ExitCode is the list of exit codes to continue to the next step.
Output []string `json:"output,omitempty"` // Output is the list of output (stdout/stderr) to continue to the next step.
MarkSuccess bool `json:"markSuccess,omitempty"` // MarkSuccess is the flag to mark the step as success when the condition is met.
}

View File

@ -1,4 +1,4 @@
package filecache
package fileutil
import (
"context"
@ -31,8 +31,8 @@ type Cache[T any] struct {
stopCh chan struct{}
}
// New creates a new cache with the specified capacity and time-to-live duration
func New[T any](cap int, ttl time.Duration) *Cache[T] {
// NewCache creates a new cache with the specified capacity and time-to-live duration
func NewCache[T any](cap int, ttl time.Duration) *Cache[T] {
return &Cache[T]{
capacity: cap,
ttl: ttl,

View File

@ -10,6 +10,7 @@ import (
"strings"
)
// Common errors for file operations
var (
ErrUnexpectedEOF = errors.New("unexpected end of input after escape character")
ErrUnknownEscapeSequence = errors.New("unknown escape sequence")

View File

@ -9,10 +9,10 @@ import (
"reflect"
"github.com/dagu-org/dagu/api/v1"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/frontend/auth"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/getkin/kin-openapi/openapi3"
"github.com/getkin/kin-openapi/openapi3filter"
"github.com/go-chi/chi/v5"
@ -22,7 +22,8 @@ import (
var _ api.StrictServerInterface = (*API)(nil)
type API struct {
client client.Client
dagClient dagstore.Client
runClient runstore.Client
remoteNodes map[string]config.RemoteNode
apiBasePath string
logEncodingCharset string
@ -30,7 +31,8 @@ type API struct {
}
func New(
cli client.Client,
dagCli dagstore.Client,
runCli runstore.Client,
cfg *config.Config,
) *API {
remoteNodes := make(map[string]config.RemoteNode)
@ -39,7 +41,8 @@ func New(
}
return &API{
client: cli,
dagClient: dagCli,
runClient: runCli,
logEncodingCharset: cfg.UI.LogEncodingCharset,
remoteNodes: remoteNodes,
apiBasePath: cfg.Server.APIBasePath,
@ -118,7 +121,7 @@ func (a *API) handleError(w http.ResponseWriter, _ *http.Request, err error) {
}
switch {
case errors.Is(err, persistence.ErrRequestIDNotFound):
case errors.Is(err, runstore.ErrRequestIDNotFound):
code = api.ErrorCodeNotFound
message = "Request ID not found"
}
@ -131,7 +134,7 @@ func (a *API) handleError(w http.ResponseWriter, _ *http.Request, err error) {
})
}
func ptr[T any](v T) *T {
func ptrOf[T any](v T) *T {
if reflect.ValueOf(v).IsZero() {
return nil
}
@ -139,7 +142,7 @@ func ptr[T any](v T) *T {
return &v
}
func value[T any](ptr *T) T {
func valueOf[T any](ptr *T) T {
if ptr == nil {
var zero T
return zero

View File

@ -12,11 +12,11 @@ import (
"strings"
"github.com/dagu-org/dagu/api/v1"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/jsondb"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/runstore/filestore"
"github.com/samber/lo"
"golang.org/x/text/encoding"
"golang.org/x/text/encoding/japanese"
@ -25,9 +25,9 @@ import (
// CreateDAG implements api.StrictServerInterface.
func (a *API) CreateDAG(ctx context.Context, request api.CreateDAGRequestObject) (api.CreateDAGResponseObject, error) {
name, err := a.client.CreateDAG(ctx, request.Body.Value)
name, err := a.dagClient.Create(ctx, request.Body.Value)
if err != nil {
if errors.Is(err, persistence.ErrDAGAlreadyExists) {
if errors.Is(err, dagstore.ErrDAGAlreadyExists) {
return nil, &Error{
HTTPStatus: http.StatusConflict,
Code: api.ErrorCodeAlreadyExists,
@ -42,7 +42,7 @@ func (a *API) CreateDAG(ctx context.Context, request api.CreateDAGRequestObject)
// DeleteDAG implements api.StrictServerInterface.
func (a *API) DeleteDAG(ctx context.Context, request api.DeleteDAGRequestObject) (api.DeleteDAGResponseObject, error) {
_, err := a.client.GetDAGStatus(ctx, request.Name)
_, err := a.dagClient.Status(ctx, request.Name)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
@ -50,7 +50,7 @@ func (a *API) DeleteDAG(ctx context.Context, request api.DeleteDAGRequestObject)
Message: fmt.Sprintf("DAG %s not found", request.Name),
}
}
if err := a.client.DeleteDAG(ctx, request.Name); err != nil {
if err := a.dagClient.Delete(ctx, request.Name); err != nil {
return nil, fmt.Errorf("error deleting DAG: %w", err)
}
return &api.DeleteDAG204Response{}, nil
@ -65,7 +65,7 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
tab = *request.Params.Tab
}
status, err := a.client.GetDAGStatus(ctx, name)
status, err := a.dagClient.Status(ctx, name)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
@ -83,16 +83,16 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
handlerOn := api.HandlerOn{}
if handlers.Failure != nil {
handlerOn.Failure = ptr(toStep(*handlers.Failure))
handlerOn.Failure = ptrOf(toStep(*handlers.Failure))
}
if handlers.Success != nil {
handlerOn.Success = ptr(toStep(*handlers.Success))
handlerOn.Success = ptrOf(toStep(*handlers.Success))
}
if handlers.Cancel != nil {
handlerOn.Cancel = ptr(toStep(*handlers.Cancel))
handlerOn.Cancel = ptrOf(toStep(*handlers.Cancel))
}
if handlers.Exit != nil {
handlerOn.Exit = ptr(toStep(*handlers.Exit))
handlerOn.Exit = ptrOf(toStep(*handlers.Exit))
}
var schedules []api.Schedule
@ -110,33 +110,33 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
dag := status.DAG
details := api.DAGDetails{
Name: dag.Name,
Description: ptr(dag.Description),
DefaultParams: ptr(dag.DefaultParams),
Delay: ptr(int(dag.Delay.Seconds())),
Env: ptr(dag.Env),
Group: ptr(dag.Group),
HandlerOn: ptr(handlerOn),
HistRetentionDays: ptr(dag.HistRetentionDays),
Location: ptr(dag.Location),
LogDir: ptr(dag.LogDir),
MaxActiveRuns: ptr(dag.MaxActiveRuns),
Params: ptr(dag.Params),
Preconditions: ptr(preconditions),
Schedule: ptr(schedules),
Steps: ptr(steps),
Tags: ptr(dag.Tags),
Description: ptrOf(dag.Description),
DefaultParams: ptrOf(dag.DefaultParams),
Delay: ptrOf(int(dag.Delay.Seconds())),
Env: ptrOf(dag.Env),
Group: ptrOf(dag.Group),
HandlerOn: ptrOf(handlerOn),
HistRetentionDays: ptrOf(dag.HistRetentionDays),
Location: ptrOf(dag.Location),
LogDir: ptrOf(dag.LogDir),
MaxActiveRuns: ptrOf(dag.MaxActiveRuns),
Params: ptrOf(dag.Params),
Preconditions: ptrOf(preconditions),
Schedule: ptrOf(schedules),
Steps: ptrOf(steps),
Tags: ptrOf(dag.Tags),
}
statusDetails := api.DAGStatusFileDetails{
DAG: details,
Error: ptr(status.ErrorAsString()),
Error: ptrOf(status.ErrorAsString()),
File: status.File,
Status: toStatus(status.Status),
Suspended: status.Suspended,
}
if status.Error != nil {
statusDetails.Error = ptr(status.Error.Error())
statusDetails.Error = ptrOf(status.Error.Error())
}
resp := &api.GetDAGDetails200JSONResponse{
@ -153,11 +153,11 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
return resp, nil
case api.DAGDetailTabSpec:
spec, err := a.client.GetDAGSpec(ctx, name)
spec, err := a.dagClient.GetSpec(ctx, name)
if err != nil {
return nil, fmt.Errorf("error getting DAG spec: %w", err)
}
resp.Definition = ptr(spec)
resp.Definition = ptrOf(spec)
case api.DAGDetailTabHistory:
historyData := a.readHistoryData(ctx, status.DAG)
@ -172,14 +172,14 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
}
}
l, err := a.readStepLog(ctx, dag, *request.Params.Step, value(request.Params.File))
l, err := a.readStepLog(ctx, dag, *request.Params.Step, valueOf(request.Params.File))
if err != nil {
return nil, err
}
resp.StepLog = l
case api.DAGDetailTabSchedulerLog:
l, err := a.readLog(ctx, dag, value(request.Params.File))
l, err := a.readLog(ctx, dag, valueOf(request.Params.File))
if err != nil {
return nil, err
}
@ -197,7 +197,7 @@ func (a *API) readHistoryData(
dag *digraph.DAG,
) api.DAGHistoryData {
defaultHistoryLimit := 30
logs := a.client.GetRecentHistory(ctx, dag.Name, defaultHistoryLimit)
statuses := a.runClient.ListRecentHistory(ctx, dag.Name, defaultHistoryLimit)
data := map[string][]scheduler.NodeStatus{}
@ -214,21 +214,21 @@ func (a *API) readHistoryData(
data[nodeName][logIdx] = status
}
for idx, log := range logs {
for _, node := range log.Status.Nodes {
addStatusFn(data, len(logs), idx, node.Step.Name, node.Status)
for idx, status := range statuses {
for _, node := range status.Nodes {
addStatusFn(data, len(statuses), idx, node.Step.Name, node.Status)
}
}
var grid []api.DAGLogGridItem
for node, statusList := range data {
var history []api.NodeStatus
var runstore []api.NodeStatus
for _, s := range statusList {
history = append(history, api.NodeStatus(s))
runstore = append(runstore, api.NodeStatus(s))
}
grid = append(grid, api.DAGLogGridItem{
Name: node,
Vals: history,
Vals: runstore,
})
}
@ -237,19 +237,18 @@ func (a *API) readHistoryData(
})
handlers := map[string][]scheduler.NodeStatus{}
for idx, log := range logs {
if n := log.Status.OnSuccess; n != nil {
addStatusFn(handlers, len(logs), idx, n.Step.Name, n.Status)
for idx, log := range statuses {
if n := log.OnSuccess; n != nil {
addStatusFn(handlers, len(statuses), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnFailure; n != nil {
addStatusFn(handlers, len(logs), idx, n.Step.Name, n.Status)
if n := log.OnFailure; n != nil {
addStatusFn(handlers, len(statuses), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnCancel; n != nil {
n := log.Status.OnCancel
addStatusFn(handlers, len(logs), idx, n.Step.Name, n.Status)
if n := log.OnCancel; n != nil {
addStatusFn(handlers, len(statuses), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnExit; n != nil {
addStatusFn(handlers, len(logs), idx, n.Step.Name, n.Status)
if n := log.OnExit; n != nil {
addStatusFn(handlers, len(statuses), idx, n.Step.Name, n.Status)
}
}
@ -260,22 +259,22 @@ func (a *API) readHistoryData(
digraph.HandlerOnExit,
} {
if statusList, ok := handlers[handlerType.String()]; ok {
var history []api.NodeStatus
var runstore []api.NodeStatus
for _, status := range statusList {
history = append(history, api.NodeStatus(status))
runstore = append(runstore, api.NodeStatus(status))
}
grid = append(grid, api.DAGLogGridItem{
Name: handlerType.String(),
Vals: history,
Vals: runstore,
})
}
}
var statusList []api.DAGLogStatusFile
for _, log := range logs {
for _, status := range statuses {
statusFile := api.DAGLogStatusFile{
File: log.File,
Status: toStatus(log.Status),
File: "", // We don't provide the file name here anymore
Status: toStatus(status),
}
statusList = append(statusList, statusFile)
}
@ -294,7 +293,7 @@ func (a *API) readLog(
var logFile string
if statusFile != "" {
status, err := jsondb.ParseStatusFile(statusFile)
status, err := filestore.ParseStatusFile(statusFile)
if err != nil {
return nil, err
}
@ -302,7 +301,7 @@ func (a *API) readLog(
}
if logFile == "" {
lastStatus, err := a.client.GetLatestStatus(ctx, dag)
lastStatus, err := a.runClient.GetLatestStatus(ctx, dag)
if err != nil {
return nil, fmt.Errorf("error getting latest status: %w", err)
}
@ -326,10 +325,10 @@ func (a *API) readStepLog(
stepName string,
statusFile string,
) (*api.StepLog, error) {
var status *persistence.Status
var status *runstore.Status
if statusFile != "" {
parsedStatus, err := jsondb.ParseStatusFile(statusFile)
parsedStatus, err := filestore.ParseStatusFile(statusFile)
if err != nil {
return nil, err
}
@ -337,7 +336,7 @@ func (a *API) readStepLog(
}
if status == nil {
latestStatus, err := a.client.GetLatestStatus(ctx, dag)
latestStatus, err := a.runClient.GetLatestStatus(ctx, dag)
if err != nil {
return nil, fmt.Errorf("error getting latest status: %w", err)
}
@ -345,7 +344,7 @@ func (a *API) readStepLog(
}
// Find the step in the status to get the log file.
var node *persistence.Node
var node *runstore.Node
for _, n := range status.Nodes {
if n.Step.Name == stepName {
node = n
@ -411,21 +410,21 @@ func readFileContent(f string, decoder *encoding.Decoder) ([]byte, error) {
// ListDAGs implements api.StrictServerInterface.
func (a *API) ListDAGs(ctx context.Context, request api.ListDAGsRequestObject) (api.ListDAGsResponseObject, error) {
var opts []client.ListStatusOption
var opts []dagstore.ListDAGOption
if request.Params.Limit != nil {
opts = append(opts, client.WithLimit(*request.Params.Limit))
opts = append(opts, dagstore.WithLimit(*request.Params.Limit))
}
if request.Params.Page != nil {
opts = append(opts, client.WithPage(*request.Params.Page))
opts = append(opts, dagstore.WithPage(*request.Params.Page))
}
if request.Params.SearchName != nil {
opts = append(opts, client.WithName(*request.Params.SearchName))
opts = append(opts, dagstore.WithName(*request.Params.SearchName))
}
if request.Params.SearchTag != nil {
opts = append(opts, client.WithTag(*request.Params.SearchTag))
opts = append(opts, dagstore.WithTag(*request.Params.SearchTag))
}
result, errList, err := a.client.ListStatus(ctx, opts...)
result, errList, err := a.dagClient.List(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("error listing DAGs: %w", err)
}
@ -439,26 +438,26 @@ func (a *API) ListDAGs(ctx context.Context, request api.ListDAGsRequestObject) (
}
resp := &api.ListDAGs200JSONResponse{
Errors: ptr(errList),
Errors: ptrOf(errList),
PageCount: result.TotalPages,
HasError: hasErr,
}
for _, item := range result.Items {
status := api.DAGStatus{
Log: ptr(item.Status.Log),
Log: ptrOf(item.Status.Log),
Name: item.Status.Name,
Params: ptr(item.Status.Params),
Pid: ptr(int(item.Status.PID)),
Params: ptrOf(item.Status.Params),
Pid: ptrOf(int(item.Status.PID)),
RequestId: item.Status.RequestID,
StartedAt: item.Status.StartedAt,
FinishedAt: item.Status.FinishedAt,
Status: api.RunStatus(item.Status.Status),
StatusText: api.RunStatusText(item.Status.StatusText),
StatusText: api.RunStatusText(item.Status.Status.String()),
}
dag := api.DAGStatusFile{
Error: ptr(item.ErrorAsString()),
Error: ptrOf(item.ErrorAsString()),
File: item.File,
Status: status,
Suspended: item.Suspended,
@ -466,7 +465,7 @@ func (a *API) ListDAGs(ctx context.Context, request api.ListDAGsRequestObject) (
}
if item.Error != nil {
dag.Error = ptr(item.Error.Error())
dag.Error = ptrOf(item.Error.Error())
}
resp.DAGs = append(resp.DAGs, dag)
@ -477,7 +476,7 @@ func (a *API) ListDAGs(ctx context.Context, request api.ListDAGsRequestObject) (
// ListTags implements api.StrictServerInterface.
func (a *API) ListTags(ctx context.Context, _ api.ListTagsRequestObject) (api.ListTagsResponseObject, error) {
tags, errs, err := a.client.GetTagList(ctx)
tags, errs, err := a.dagClient.TagList(ctx)
if err != nil {
return nil, fmt.Errorf("error getting tags: %w", err)
}
@ -491,9 +490,9 @@ func (a *API) ListTags(ctx context.Context, _ api.ListTagsRequestObject) (api.Li
func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionRequestObject) (api.PostDAGActionResponseObject, error) {
action := request.Body.Action
var status client.DAGStatus
var status dagstore.Status
if action != api.DAGActionSave {
s, err := a.client.GetDAGStatus(ctx, request.Name)
s, err := a.dagClient.Status(ctx, request.Name)
if err != nil {
return nil, err
}
@ -509,15 +508,15 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
Message: "DAG is already running",
}
}
if err := a.client.StartDAG(ctx, status.DAG, client.StartOptions{
Params: value(request.Body.Params),
if err := a.runClient.Start(ctx, status.DAG, runstore.StartOptions{
Params: valueOf(request.Body.Params),
}); err != nil {
return nil, fmt.Errorf("error starting DAG: %w", err)
}
return api.PostDAGAction200JSONResponse{}, nil
case api.DAGActionSuspend:
b, err := strconv.ParseBool(value(request.Body.Value))
b, err := strconv.ParseBool(valueOf(request.Body.Value))
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusBadRequest,
@ -525,7 +524,7 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
Message: "invalid value for suspend, must be true or false",
}
}
if err := a.client.ToggleSuspend(ctx, request.Name, b); err != nil {
if err := a.dagClient.ToggleSuspend(ctx, request.Name, b); err != nil {
return nil, fmt.Errorf("error toggling suspend: %w", err)
}
return api.PostDAGAction200JSONResponse{}, nil
@ -538,7 +537,7 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
Message: "DAG is not running",
}
}
if err := a.client.StopDAG(ctx, status.DAG); err != nil {
if err := a.runClient.Stop(ctx, status.DAG, ""); err != nil {
return nil, fmt.Errorf("error stopping DAG: %w", err)
}
return api.PostDAGAction200JSONResponse{}, nil
@ -551,7 +550,7 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
Message: "requestId is required for retry action",
}
}
if err := a.client.RetryDAG(ctx, status.DAG, *request.Body.RequestId); err != nil {
if err := a.runClient.Retry(ctx, status.DAG, *request.Body.RequestId); err != nil {
return nil, fmt.Errorf("error retrying DAG: %w", err)
}
return api.PostDAGAction200JSONResponse{}, nil
@ -601,7 +600,7 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
}
}
if err := a.client.UpdateDAG(ctx, request.Name, *request.Body.Value); err != nil {
if err := a.dagClient.Update(ctx, request.Name, *request.Body.Value); err != nil {
return nil, err
}
@ -617,12 +616,12 @@ func (a *API) PostDAGAction(ctx context.Context, request api.PostDAGActionReques
}
newName := *request.Body.Value
if err := a.client.MoveDAG(ctx, request.Name, newName); err != nil {
if err := a.dagClient.Move(ctx, request.Name, newName); err != nil {
return nil, fmt.Errorf("error renaming DAG: %w", err)
}
return api.PostDAGAction200JSONResponse{
NewName: ptr(newName),
NewName: ptrOf(newName),
}, nil
default:
@ -635,14 +634,22 @@ func (a *API) updateStatus(
ctx context.Context,
reqID string,
step string,
dagStatus client.DAGStatus,
dagStatus dagstore.Status,
to scheduler.NodeStatus,
) error {
status, err := a.client.GetStatusByRequestID(ctx, dagStatus.DAG, reqID)
status, err := a.runClient.GetRealtimeStatus(ctx, dagStatus.DAG, reqID)
if err != nil {
return fmt.Errorf("error getting status: %w", err)
}
if status.Status == scheduler.StatusRunning {
return &Error{
HTTPStatus: http.StatusBadRequest,
Code: api.ErrorCodeBadRequest,
Message: "cannot change status of running DAG",
}
}
idxToUpdate := -1
for idx, n := range status.Nodes {
@ -659,9 +666,9 @@ func (a *API) updateStatus(
}
status.Nodes[idxToUpdate].Status = to
status.Nodes[idxToUpdate].StatusText = to.String()
if err := a.client.UpdateStatus(ctx, dagStatus.DAG.Name, *status); err != nil {
rootDAG := digraph.NewRootDAG(dagStatus.DAG.Name, reqID)
if err := a.runClient.UpdateStatus(ctx, rootDAG, *status); err != nil {
return fmt.Errorf("error updating status: %w", err)
}
@ -679,7 +686,7 @@ func (a *API) SearchDAGs(ctx context.Context, request api.SearchDAGsRequestObjec
}
}
ret, errs, err := a.client.GrepDAG(ctx, query)
ret, errs, err := a.dagClient.Grep(ctx, query)
if err != nil {
return nil, fmt.Errorf("error searching DAGs: %w", err)
}
@ -716,12 +723,12 @@ func toDAG(dag *digraph.DAG) api.DAG {
return api.DAG{
Name: dag.Name,
Group: ptr(dag.Group),
Description: ptr(dag.Description),
Params: ptr(dag.Params),
DefaultParams: ptr(dag.DefaultParams),
Tags: ptr(dag.Tags),
Schedule: ptr(schedules),
Group: ptrOf(dag.Group),
Description: ptrOf(dag.Description),
Params: ptrOf(dag.Params),
DefaultParams: ptrOf(dag.DefaultParams),
Tags: ptrOf(dag.Tags),
Schedule: ptrOf(schedules),
}
}
@ -732,70 +739,70 @@ func toStep(obj digraph.Step) api.Step {
}
repeatPolicy := api.RepeatPolicy{
Repeat: ptr(obj.RepeatPolicy.Repeat),
Interval: ptr(int(obj.RepeatPolicy.Interval.Seconds())),
Repeat: ptrOf(obj.RepeatPolicy.Repeat),
Interval: ptrOf(int(obj.RepeatPolicy.Interval.Seconds())),
}
step := api.Step{
Name: obj.Name,
Description: ptr(obj.Description),
Args: ptr(obj.Args),
CmdWithArgs: ptr(obj.CmdWithArgs),
Command: ptr(obj.Command),
Depends: ptr(obj.Depends),
Dir: ptr(obj.Dir),
MailOnError: ptr(obj.MailOnError),
Output: ptr(obj.Output),
Preconditions: ptr(conditions),
RepeatPolicy: ptr(repeatPolicy),
Script: ptr(obj.Script),
Description: ptrOf(obj.Description),
Args: ptrOf(obj.Args),
CmdWithArgs: ptrOf(obj.CmdWithArgs),
Command: ptrOf(obj.Command),
Depends: ptrOf(obj.Depends),
Dir: ptrOf(obj.Dir),
MailOnError: ptrOf(obj.MailOnError),
Output: ptrOf(obj.Output),
Preconditions: ptrOf(conditions),
RepeatPolicy: ptrOf(repeatPolicy),
Script: ptrOf(obj.Script),
}
if obj.SubDAG != nil {
step.Run = ptr(obj.SubDAG.Name)
step.Params = ptr(obj.SubDAG.Params)
step.Run = ptrOf(obj.SubDAG.Name)
step.Params = ptrOf(obj.SubDAG.Params)
}
return step
}
func toPrecondition(obj digraph.Condition) api.Precondition {
return api.Precondition{
Condition: ptr(obj.Condition),
Expected: ptr(obj.Expected),
Condition: ptrOf(obj.Condition),
Expected: ptrOf(obj.Expected),
}
}
func toStatus(s persistence.Status) api.DAGStatusDetails {
func toStatus(s runstore.Status) api.DAGStatusDetails {
status := api.DAGStatusDetails{
Log: s.Log,
Name: s.Name,
Params: ptr(s.Params),
Params: ptrOf(s.Params),
Pid: int(s.PID),
RequestId: s.RequestID,
StartedAt: s.StartedAt,
FinishedAt: s.FinishedAt,
Status: api.RunStatus(s.Status),
StatusText: api.RunStatusText(s.StatusText),
StatusText: api.RunStatusText(s.Status.String()),
}
for _, n := range s.Nodes {
status.Nodes = append(status.Nodes, toNode(n))
}
if s.OnSuccess != nil {
status.OnSuccess = ptr(toNode(s.OnSuccess))
status.OnSuccess = ptrOf(toNode(s.OnSuccess))
}
if s.OnFailure != nil {
status.OnFailure = ptr(toNode(s.OnFailure))
status.OnFailure = ptrOf(toNode(s.OnFailure))
}
if s.OnCancel != nil {
status.OnCancel = ptr(toNode(s.OnCancel))
status.OnCancel = ptrOf(toNode(s.OnCancel))
}
if s.OnExit != nil {
status.OnExit = ptr(toNode(s.OnExit))
status.OnExit = ptrOf(toNode(s.OnExit))
}
return status
}
func toNode(node *persistence.Node) api.Node {
func toNode(node *runstore.Node) api.Node {
return api.Node{
DoneCount: node.DoneCount,
FinishedAt: node.FinishedAt,
@ -803,8 +810,8 @@ func toNode(node *persistence.Node) api.Node {
RetryCount: node.RetryCount,
StartedAt: node.StartedAt,
Status: api.NodeStatus(node.Status),
StatusText: api.NodeStatusText(node.StatusText),
StatusText: api.NodeStatusText(node.Status.String()),
Step: toStep(node.Step),
Error: ptr(node.Error),
Error: ptrOf(node.Error),
}
}

View File

@ -9,11 +9,11 @@ import (
"reflect"
"github.com/dagu-org/dagu/api/v2"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/frontend/auth"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/getkin/kin-openapi/openapi3"
"github.com/getkin/kin-openapi/openapi3filter"
"github.com/go-chi/chi/v5"
@ -23,7 +23,8 @@ import (
var _ api.StrictServerInterface = (*API)(nil)
type API struct {
client client.Client
dagClient dagstore.Client
runClient runstore.Client
remoteNodes map[string]config.RemoteNode
apiBasePath string
logEncodingCharset string
@ -31,7 +32,8 @@ type API struct {
}
func New(
cli client.Client,
dagCli dagstore.Client,
runCli runstore.Client,
cfg *config.Config,
) *API {
remoteNodes := make(map[string]config.RemoteNode)
@ -40,7 +42,8 @@ func New(
}
return &API{
client: cli,
dagClient: dagCli,
runClient: runCli,
logEncodingCharset: cfg.UI.LogEncodingCharset,
remoteNodes: remoteNodes,
apiBasePath: cfg.Server.APIBasePath,
@ -119,15 +122,15 @@ func (a *API) handleError(w http.ResponseWriter, r *http.Request, err error) {
}
switch {
case errors.Is(err, persistence.ErrDAGNotFound):
case errors.Is(err, dagstore.ErrDAGNotFound):
code = api.ErrorCodeNotFound
message = "DAG not found"
case errors.Is(err, persistence.ErrRequestIDNotFound):
case errors.Is(err, runstore.ErrRequestIDNotFound):
code = api.ErrorCodeNotFound
message = "Request ID not found"
case errors.Is(err, persistence.ErrDAGAlreadyExists):
case errors.Is(err, dagstore.ErrDAGAlreadyExists):
code = api.ErrorCodeAlreadyExists
message = "DAG already exists"
@ -144,7 +147,7 @@ func (a *API) handleError(w http.ResponseWriter, r *http.Request, err error) {
})
}
func ptr[T any](v T) *T {
func ptrOf[T any](v T) *T {
if reflect.ValueOf(v).IsZero() {
return nil
}
@ -152,7 +155,7 @@ func ptr[T any](v T) *T {
return &v
}
func value[T any](ptr *T) T {
func valueOf[T any](ptr *T) T {
if ptr == nil {
var zero T
return zero
@ -161,7 +164,7 @@ func value[T any](ptr *T) T {
}
// toPagination converts a paginated result to an API pagination object.
func toPagination[T any](paginatedResult persistence.PaginatedResult[T]) api.Pagination {
func toPagination[T any](paginatedResult dagstore.PaginatedResult[T]) api.Pagination {
return api.Pagination{
CurrentPage: paginatedResult.CurrentPage,
NextPage: paginatedResult.NextPage,

View File

@ -7,18 +7,19 @@ import (
"net/http"
"sort"
"strings"
"time"
"github.com/dagu-org/dagu/api/v2"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
)
func (a *API) CreateNewDAG(ctx context.Context, request api.CreateNewDAGRequestObject) (api.CreateNewDAGResponseObject, error) {
name, err := a.client.CreateDAG(ctx, request.Body.Name)
name, err := a.dagClient.Create(ctx, request.Body.Name)
if err != nil {
if errors.Is(err, persistence.ErrDAGAlreadyExists) {
if errors.Is(err, dagstore.ErrDAGAlreadyExists) {
return nil, &Error{
HTTPStatus: http.StatusConflict,
Code: api.ErrorCodeAlreadyExists,
@ -31,29 +32,29 @@ func (a *API) CreateNewDAG(ctx context.Context, request api.CreateNewDAGRequestO
}, nil
}
func (a *API) DeleteDAGByFileId(ctx context.Context, request api.DeleteDAGByFileIdRequestObject) (api.DeleteDAGByFileIdResponseObject, error) {
_, err := a.client.GetDAGStatus(ctx, request.FileId)
func (a *API) DeleteDAG(ctx context.Context, request api.DeleteDAGRequestObject) (api.DeleteDAGResponseObject, error) {
_, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
if err := a.client.DeleteDAG(ctx, request.FileId); err != nil {
if err := a.dagClient.Delete(ctx, request.FileName); err != nil {
return nil, fmt.Errorf("error deleting DAG: %w", err)
}
return &api.DeleteDAGByFileId204Response{}, nil
return &api.DeleteDAG204Response{}, nil
}
func (a *API) GetDAGDefinition(ctx context.Context, request api.GetDAGDefinitionRequestObject) (api.GetDAGDefinitionResponseObject, error) {
spec, err := a.client.GetDAGSpec(ctx, request.FileId)
func (a *API) GetDAGSpec(ctx context.Context, request api.GetDAGSpecRequestObject) (api.GetDAGSpecResponseObject, error) {
spec, err := a.dagClient.GetSpec(ctx, request.FileName)
if err != nil {
return nil, err
}
// Validate the spec
dag, err := a.client.LoadYAML(ctx, []byte(spec), digraph.WithName(request.FileId))
dag, err := a.runClient.LoadYAML(ctx, []byte(spec), digraph.WithName(request.FileName))
var errs []string
var loadErrs digraph.ErrorList
@ -63,24 +64,24 @@ func (a *API) GetDAGDefinition(ctx context.Context, request api.GetDAGDefinition
return nil, err
}
return &api.GetDAGDefinition200JSONResponse{
return &api.GetDAGSpec200JSONResponse{
Dag: toDAGDetails(dag),
Spec: spec,
Errors: errs,
}, nil
}
func (a *API) UpdateDAGDefinition(ctx context.Context, request api.UpdateDAGDefinitionRequestObject) (api.UpdateDAGDefinitionResponseObject, error) {
_, err := a.client.GetDAGStatus(ctx, request.FileId)
func (a *API) UpdateDAGSpec(ctx context.Context, request api.UpdateDAGSpecRequestObject) (api.UpdateDAGSpecResponseObject, error) {
_, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
err = a.client.UpdateDAG(ctx, request.FileId, request.Body.Spec)
err = a.dagClient.Update(ctx, request.FileName, request.Body.Spec)
var errs []string
var loadErrs digraph.ErrorList
@ -90,18 +91,18 @@ func (a *API) UpdateDAGDefinition(ctx context.Context, request api.UpdateDAGDefi
return nil, err
}
return api.UpdateDAGDefinition200JSONResponse{
return api.UpdateDAGSpec200JSONResponse{
Errors: errs,
}, nil
}
func (a *API) RenameDAG(ctx context.Context, request api.RenameDAGRequestObject) (api.RenameDAGResponseObject, error) {
status, err := a.client.GetDAGStatus(ctx, request.FileId)
status, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
if status.Status.Status == scheduler.StatusRunning {
@ -111,46 +112,45 @@ func (a *API) RenameDAG(ctx context.Context, request api.RenameDAGRequestObject)
Message: "DAG is running",
}
}
if err := a.client.MoveDAG(ctx, request.FileId, request.Body.NewFileId); err != nil {
if err := a.dagClient.Move(ctx, request.FileName, request.Body.NewFileName); err != nil {
return nil, fmt.Errorf("failed to move DAG: %w", err)
}
return api.RenameDAG200Response{}, nil
}
func (a *API) GetDAGExecutionHistory(ctx context.Context, request api.GetDAGExecutionHistoryRequestObject) (api.GetDAGExecutionHistoryResponseObject, error) {
status, err := a.client.GetDAGStatus(ctx, request.FileId)
func (a *API) GetDAGRunHistory(ctx context.Context, request api.GetDAGRunHistoryRequestObject) (api.GetDAGRunHistoryResponseObject, error) {
status, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
defaultHistoryLimit := 30
recentRuns := a.client.GetRecentHistory(ctx, status.DAG.Name, defaultHistoryLimit)
recentHistory := a.runClient.ListRecentHistory(ctx, status.DAG.Name, defaultHistoryLimit)
var runs []api.RunDetails
for _, log := range recentRuns {
runs = append(runs, toRunDetails(log.Status))
for _, status := range recentHistory {
runs = append(runs, toRunDetails(status))
}
gridData := a.readHistoryData(ctx, recentRuns)
return api.GetDAGExecutionHistory200JSONResponse{
gridData := a.readHistoryData(ctx, recentHistory)
return api.GetDAGRunHistory200JSONResponse{
Runs: runs,
GridData: gridData,
}, nil
}
func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsRequestObject) (api.GetDAGDetailsResponseObject, error) {
location := request.FileId
status, err := a.client.GetDAGStatus(ctx, location)
fileName := request.FileName
status, err := a.dagClient.Status(ctx, fileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", location),
Message: fmt.Sprintf("DAG %s not found", fileName),
}
}
@ -171,7 +171,7 @@ func (a *API) GetDAGDetails(ctx context.Context, request api.GetDAGDetailsReques
func (a *API) readHistoryData(
_ context.Context,
recentRuns []persistence.Run,
statusList []runstore.Status,
) []api.DAGGridItem {
data := map[string][]scheduler.NodeStatus{}
@ -188,9 +188,9 @@ func (a *API) readHistoryData(
data[nodeName][logIdx] = status
}
for idx, run := range recentRuns {
for _, node := range run.Status.Nodes {
addStatusFn(data, len(recentRuns), idx, node.Step.Name, node.Status)
for idx, status := range statusList {
for _, node := range status.Nodes {
addStatusFn(data, len(statusList), idx, node.Step.Name, node.Status)
}
}
@ -211,19 +211,18 @@ func (a *API) readHistoryData(
})
handlers := map[string][]scheduler.NodeStatus{}
for idx, log := range recentRuns {
if n := log.Status.OnSuccess; n != nil {
addStatusFn(handlers, len(recentRuns), idx, n.Step.Name, n.Status)
for idx, status := range statusList {
if n := status.OnSuccess; n != nil {
addStatusFn(handlers, len(statusList), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnFailure; n != nil {
addStatusFn(handlers, len(recentRuns), idx, n.Step.Name, n.Status)
if n := status.OnFailure; n != nil {
addStatusFn(handlers, len(statusList), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnCancel; n != nil {
n := log.Status.OnCancel
addStatusFn(handlers, len(recentRuns), idx, n.Step.Name, n.Status)
if n := status.OnCancel; n != nil {
addStatusFn(handlers, len(statusList), idx, n.Step.Name, n.Status)
}
if n := log.Status.OnExit; n != nil {
addStatusFn(handlers, len(recentRuns), idx, n.Step.Name, n.Status)
if n := status.OnExit; n != nil {
addStatusFn(handlers, len(statusList), idx, n.Step.Name, n.Status)
}
}
@ -248,42 +247,42 @@ func (a *API) readHistoryData(
return grid
}
func (a *API) ListAllDAGs(ctx context.Context, request api.ListAllDAGsRequestObject) (api.ListAllDAGsResponseObject, error) {
var opts []client.ListStatusOption
func (a *API) ListDAGs(ctx context.Context, request api.ListDAGsRequestObject) (api.ListDAGsResponseObject, error) {
var opts []dagstore.ListDAGOption
if request.Params.PerPage != nil {
opts = append(opts, client.WithLimit(*request.Params.PerPage))
opts = append(opts, dagstore.WithLimit(*request.Params.PerPage))
}
if request.Params.Page != nil {
opts = append(opts, client.WithPage(*request.Params.Page))
opts = append(opts, dagstore.WithPage(*request.Params.Page))
}
if request.Params.Name != nil {
opts = append(opts, client.WithName(*request.Params.Name))
opts = append(opts, dagstore.WithName(*request.Params.Name))
}
if request.Params.Tag != nil {
opts = append(opts, client.WithTag(*request.Params.Tag))
opts = append(opts, dagstore.WithTag(*request.Params.Tag))
}
result, errList, err := a.client.ListStatus(ctx, opts...)
result, errList, err := a.dagClient.List(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("error listing DAGs: %w", err)
}
resp := &api.ListAllDAGs200JSONResponse{
resp := &api.ListDAGs200JSONResponse{
Errors: errList,
Pagination: toPagination(*result),
}
for _, item := range result.Items {
run := api.RunSummary{
Log: item.Status.Log,
Name: item.Status.Name,
Params: ptr(item.Status.Params),
Pid: ptr(int(item.Status.PID)),
RequestId: item.Status.RequestID,
StartedAt: item.Status.StartedAt,
FinishedAt: item.Status.FinishedAt,
Status: api.Status(item.Status.Status),
StatusText: api.StatusText(item.Status.StatusText),
Log: item.Status.Log,
Name: item.Status.Name,
Params: ptrOf(item.Status.Params),
Pid: ptrOf(int(item.Status.PID)),
RequestId: item.Status.RequestID,
StartedAt: item.Status.StartedAt,
FinishedAt: item.Status.FinishedAt,
Status: api.Status(item.Status.Status),
StatusLabel: api.StatusLabel(item.Status.Status.String()),
}
var loadErrs digraph.ErrorList
@ -295,7 +294,7 @@ func (a *API) ListAllDAGs(ctx context.Context, request api.ListAllDAGsRequestObj
}
dag := api.DAGFile{
FileId: item.DAG.FileID(),
FileName: item.DAG.FileName(),
Errors: errs,
LatestRun: run,
Suspended: item.Suspended,
@ -309,7 +308,7 @@ func (a *API) ListAllDAGs(ctx context.Context, request api.ListAllDAGsRequestObj
}
func (a *API) GetAllDAGTags(ctx context.Context, _ api.GetAllDAGTagsRequestObject) (api.GetAllDAGTagsResponseObject, error) {
tags, errs, err := a.client.GetTagList(ctx)
tags, errs, err := a.dagClient.TagList(ctx)
if err != nil {
return nil, fmt.Errorf("error getting tags: %w", err)
}
@ -320,10 +319,10 @@ func (a *API) GetAllDAGTags(ctx context.Context, _ api.GetAllDAGTagsRequestObjec
}
func (a *API) GetDAGRunDetails(ctx context.Context, request api.GetDAGRunDetailsRequestObject) (api.GetDAGRunDetailsResponseObject, error) {
dagLocation := request.FileId
dagFileName := request.FileName
requestId := request.RequestId
dagWithStatus, err := a.client.GetDAGStatus(ctx, dagLocation)
dagWithStatus, err := a.dagClient.Status(ctx, dagFileName)
if err != nil {
return nil, fmt.Errorf("error getting latest status: %w", err)
}
@ -334,23 +333,23 @@ func (a *API) GetDAGRunDetails(ctx context.Context, request api.GetDAGRunDetails
}, nil
}
run, err := a.client.GetStatusByRequestID(ctx, dagWithStatus.DAG, requestId)
status, err := a.runClient.GetRealtimeStatus(ctx, dagWithStatus.DAG, requestId)
if err != nil {
return nil, fmt.Errorf("error getting status by request ID: %w", err)
}
return &api.GetDAGRunDetails200JSONResponse{
Run: toRunDetails(*run),
Run: toRunDetails(*status),
}, nil
}
func (a *API) ExecuteDAG(ctx context.Context, request api.ExecuteDAGRequestObject) (api.ExecuteDAGResponseObject, error) {
status, err := a.client.GetDAGStatus(ctx, request.FileId)
status, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
if status.Status.Status == scheduler.StatusRunning {
@ -360,21 +359,65 @@ func (a *API) ExecuteDAG(ctx context.Context, request api.ExecuteDAGRequestObjec
Message: "DAG is already running",
}
}
if err := a.client.StartDAG(ctx, status.DAG, client.StartOptions{
Params: value(request.Body.Params),
requestID, err := a.runClient.GenerateRequestID(ctx)
if err != nil {
return nil, fmt.Errorf("error generating request ID: %w", err)
}
if err := a.runClient.Start(ctx, status.DAG, runstore.StartOptions{
Params: valueOf(request.Body.Params),
RequestID: requestID,
}); err != nil {
return nil, fmt.Errorf("error starting DAG: %w", err)
}
return api.ExecuteDAG200Response{}, nil
// Wait for the DAG to start
timer := time.NewTimer(3 * time.Second)
var running bool
defer timer.Stop()
waitLoop:
for {
select {
case <-timer.C:
break waitLoop
case <-ctx.Done():
break waitLoop
default:
status, _ := a.runClient.GetRealtimeStatus(ctx, status.DAG, requestID)
if status == nil {
continue
}
if status.Status == scheduler.StatusRunning {
running = true
timer.Stop()
break waitLoop
}
time.Sleep(100 * time.Millisecond)
}
}
if !running {
return nil, &Error{
HTTPStatus: http.StatusInternalServerError,
Code: api.ErrorCodeInternalError,
Message: "DAG did not start",
}
}
return api.ExecuteDAG200JSONResponse{
RequestId: requestID,
}, nil
}
func (a *API) TerminateDAGExecution(ctx context.Context, request api.TerminateDAGExecutionRequestObject) (api.TerminateDAGExecutionResponseObject, error) {
status, err := a.client.GetDAGStatus(ctx, request.FileId)
func (a *API) TerminateDAGRun(ctx context.Context, request api.TerminateDAGRunRequestObject) (api.TerminateDAGRunResponseObject, error) {
status, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
if status.Status.Status != scheduler.StatusRunning {
@ -384,19 +427,19 @@ func (a *API) TerminateDAGExecution(ctx context.Context, request api.TerminateDA
Message: "DAG is not running",
}
}
if err := a.client.StopDAG(ctx, status.DAG); err != nil {
if err := a.runClient.Stop(ctx, status.DAG, status.Status.RequestID); err != nil {
return nil, fmt.Errorf("error stopping DAG: %w", err)
}
return api.TerminateDAGExecution200Response{}, nil
return api.TerminateDAGRun200Response{}, nil
}
func (a *API) RetryDAGExecution(ctx context.Context, request api.RetryDAGExecutionRequestObject) (api.RetryDAGExecutionResponseObject, error) {
status, err := a.client.GetDAGStatus(ctx, request.FileId)
func (a *API) RetryDAGRun(ctx context.Context, request api.RetryDAGRunRequestObject) (api.RetryDAGRunResponseObject, error) {
status, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return nil, &Error{
HTTPStatus: http.StatusNotFound,
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}
}
if status.Status.Status == scheduler.StatusRunning {
@ -407,31 +450,31 @@ func (a *API) RetryDAGExecution(ctx context.Context, request api.RetryDAGExecuti
}
}
if err := a.client.RetryDAG(ctx, status.DAG, request.Body.RequestId); err != nil {
if err := a.runClient.Retry(ctx, status.DAG, request.Body.RequestId); err != nil {
return nil, fmt.Errorf("error retrying DAG: %w", err)
}
return api.RetryDAGExecution200Response{}, nil
return api.RetryDAGRun200Response{}, nil
}
func (a *API) UpdateDAGSuspensionState(ctx context.Context, request api.UpdateDAGSuspensionStateRequestObject) (api.UpdateDAGSuspensionStateResponseObject, error) {
_, err := a.client.GetDAGStatus(ctx, request.FileId)
_, err := a.dagClient.Status(ctx, request.FileName)
if err != nil {
return &api.UpdateDAGSuspensionState404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("DAG %s not found", request.FileId),
Message: fmt.Sprintf("DAG %s not found", request.FileName),
}, nil
}
if err := a.client.ToggleSuspend(ctx, request.FileId, request.Body.Suspend); err != nil {
if err := a.dagClient.ToggleSuspend(ctx, request.FileName, request.Body.Suspend); err != nil {
return nil, fmt.Errorf("error toggling suspend: %w", err)
}
return api.UpdateDAGSuspensionState200Response{}, nil
}
func (a *API) SearchDAGDefinitions(ctx context.Context, request api.SearchDAGDefinitionsRequestObject) (api.SearchDAGDefinitionsResponseObject, error) {
ret, errs, err := a.client.GrepDAG(ctx, request.Params.Q)
func (a *API) SearchDAGs(ctx context.Context, request api.SearchDAGsRequestObject) (api.SearchDAGsResponseObject, error) {
ret, errs, err := a.dagClient.Grep(ctx, request.Params.Q)
if err != nil {
return nil, fmt.Errorf("error searching DAGs: %w", err)
}
@ -454,167 +497,8 @@ func (a *API) SearchDAGDefinitions(ctx context.Context, request api.SearchDAGDef
})
}
return &api.SearchDAGDefinitions200JSONResponse{
return &api.SearchDAGs200JSONResponse{
Results: results,
Errors: errs,
}, nil
}
func toDAG(dag *digraph.DAG) api.DAG {
var schedules []api.Schedule
for _, s := range dag.Schedule {
schedules = append(schedules, api.Schedule{Expression: s.Expression})
}
return api.DAG{
Name: dag.Name,
Group: ptr(dag.Group),
Description: ptr(dag.Description),
Params: ptr(dag.Params),
DefaultParams: ptr(dag.DefaultParams),
Tags: ptr(dag.Tags),
Schedule: ptr(schedules),
}
}
func toStep(obj digraph.Step) api.Step {
var conditions []api.Precondition
for _, cond := range obj.Preconditions {
conditions = append(conditions, toPrecondition(cond))
}
repeatPolicy := api.RepeatPolicy{
Repeat: ptr(obj.RepeatPolicy.Repeat),
Interval: ptr(int(obj.RepeatPolicy.Interval.Seconds())),
}
step := api.Step{
Name: obj.Name,
Description: ptr(obj.Description),
Args: ptr(obj.Args),
CmdWithArgs: ptr(obj.CmdWithArgs),
Command: ptr(obj.Command),
Depends: ptr(obj.Depends),
Dir: ptr(obj.Dir),
MailOnError: ptr(obj.MailOnError),
Output: ptr(obj.Output),
Preconditions: ptr(conditions),
RepeatPolicy: ptr(repeatPolicy),
Script: ptr(obj.Script),
}
if obj.SubDAG != nil {
step.Run = ptr(obj.SubDAG.Name)
step.Params = ptr(obj.SubDAG.Params)
}
return step
}
func toPrecondition(obj digraph.Condition) api.Precondition {
return api.Precondition{
Condition: ptr(obj.Condition),
Expected: ptr(obj.Expected),
}
}
func toRunDetails(s persistence.Status) api.RunDetails {
status := api.RunDetails{
Log: s.Log,
Name: s.Name,
Params: ptr(s.Params),
Pid: ptr(int(s.PID)),
RequestId: s.RequestID,
StartedAt: s.StartedAt,
FinishedAt: s.FinishedAt,
Status: api.Status(s.Status),
StatusText: api.StatusText(s.StatusText),
}
for _, n := range s.Nodes {
status.Nodes = append(status.Nodes, toNode(n))
}
if s.OnSuccess != nil {
status.OnSuccess = ptr(toNode(s.OnSuccess))
}
if s.OnFailure != nil {
status.OnFailure = ptr(toNode(s.OnFailure))
}
if s.OnCancel != nil {
status.OnCancel = ptr(toNode(s.OnCancel))
}
if s.OnExit != nil {
status.OnExit = ptr(toNode(s.OnExit))
}
return status
}
func toNode(node *persistence.Node) api.Node {
return api.Node{
DoneCount: node.DoneCount,
FinishedAt: node.FinishedAt,
Log: node.Log,
RetryCount: node.RetryCount,
StartedAt: node.StartedAt,
Status: api.NodeStatus(node.Status),
StatusText: api.NodeStatusText(node.StatusText),
Step: toStep(node.Step),
Error: ptr(node.Error),
}
}
func toDAGDetails(dag *digraph.DAG) *api.DAGDetails {
var details *api.DAGDetails
if dag == nil {
return details
}
var steps []api.Step
for _, step := range dag.Steps {
steps = append(steps, toStep(step))
}
handlers := dag.HandlerOn
handlerOn := api.HandlerOn{}
if handlers.Failure != nil {
handlerOn.Failure = ptr(toStep(*handlers.Failure))
}
if handlers.Success != nil {
handlerOn.Success = ptr(toStep(*handlers.Success))
}
if handlers.Cancel != nil {
handlerOn.Cancel = ptr(toStep(*handlers.Cancel))
}
if handlers.Exit != nil {
handlerOn.Exit = ptr(toStep(*handlers.Exit))
}
var schedules []api.Schedule
for _, s := range dag.Schedule {
schedules = append(schedules, api.Schedule{
Expression: s.Expression,
})
}
var preconditions []api.Precondition
for _, p := range dag.Preconditions {
preconditions = append(preconditions, toPrecondition(p))
}
return &api.DAGDetails{
Name: dag.Name,
Description: ptr(dag.Description),
DefaultParams: ptr(dag.DefaultParams),
Delay: ptr(int(dag.Delay.Seconds())),
Env: ptr(dag.Env),
Group: ptr(dag.Group),
HandlerOn: ptr(handlerOn),
HistRetentionDays: ptr(dag.HistRetentionDays),
LogDir: ptr(dag.LogDir),
MaxActiveRuns: ptr(dag.MaxActiveRuns),
Params: ptr(dag.Params),
Preconditions: ptr(preconditions),
Schedule: ptr(schedules),
Steps: ptr(steps),
Tags: ptr(dag.Tags),
}
}

View File

@ -19,7 +19,7 @@ func TestDAG(t *testing.T) {
// Fetch the created DAG with the list endpoint
resp := server.Client().Get("/api/v2/dags?name=test_dag").ExpectStatus(http.StatusOK).Send(t)
var apiResp api.ListAllDAGs200JSONResponse
var apiResp api.ListDAGs200JSONResponse
resp.Unmarshal(t, &apiResp)
require.Len(t, apiResp.Dags, 1, "expected one DAG")

View File

@ -7,18 +7,19 @@ import (
"os"
"github.com/dagu-org/dagu/api/v2"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"golang.org/x/text/encoding"
"golang.org/x/text/transform"
)
func (a *API) GetDAGRunLog(ctx context.Context, request api.GetDAGRunLogRequestObject) (api.GetDAGRunLogResponseObject, error) {
func (a *API) GetRunLog(ctx context.Context, request api.GetRunLogRequestObject) (api.GetRunLogResponseObject, error) {
dagName := request.DagName
requestId := request.RequestId
status, err := a.client.GetStatus(ctx, dagName, requestId)
status, err := a.runClient.FindByRequestID(ctx, dagName, requestId)
if err != nil {
return api.GetDAGRunLog404JSONResponse{
return api.GetRunLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", requestId, dagName),
}, nil
@ -29,18 +30,18 @@ func (a *API) GetDAGRunLog(ctx context.Context, request api.GetDAGRunLogRequestO
return nil, fmt.Errorf("error reading %s: %w", status.Log, err)
}
return api.GetDAGRunLog200JSONResponse{
return api.GetRunLog200JSONResponse{
Content: string(content),
}, nil
}
func (a *API) GetDAGStepLog(ctx context.Context, request api.GetDAGStepLogRequestObject) (api.GetDAGStepLogResponseObject, error) {
func (a *API) GetRunStepLog(ctx context.Context, request api.GetRunStepLogRequestObject) (api.GetRunStepLogResponseObject, error) {
dagName := request.DagName
requestId := request.RequestId
status, err := a.client.GetStatus(ctx, dagName, requestId)
status, err := a.runClient.FindByRequestID(ctx, dagName, requestId)
if err != nil {
return api.GetDAGStepLog404JSONResponse{
return api.GetRunStepLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", requestId, dagName),
}, nil
@ -48,7 +49,7 @@ func (a *API) GetDAGStepLog(ctx context.Context, request api.GetDAGStepLogReques
node, err := status.NodeByName(request.StepName)
if err != nil {
return api.GetDAGStepLog404JSONResponse{
return api.GetRunStepLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("step %s not found in DAG %s", request.StepName, dagName),
}, nil
@ -59,21 +60,21 @@ func (a *API) GetDAGStepLog(ctx context.Context, request api.GetDAGStepLogReques
return nil, fmt.Errorf("error reading %s: %w", status.Log, err)
}
return api.GetDAGStepLog200JSONResponse{
return api.GetRunStepLog200JSONResponse{
Content: string(content),
}, nil
}
func (a *API) UpdateDAGStepStatus(ctx context.Context, request api.UpdateDAGStepStatusRequestObject) (api.UpdateDAGStepStatusResponseObject, error) {
status, err := a.client.GetStatus(ctx, request.DagName, request.RequestId)
func (a *API) UpdateRunStepStatus(ctx context.Context, request api.UpdateRunStepStatusRequestObject) (api.UpdateRunStepStatusResponseObject, error) {
status, err := a.runClient.FindByRequestID(ctx, request.DagName, request.RequestId)
if err != nil {
return &api.UpdateDAGStepStatus404JSONResponse{
return &api.UpdateRunStepStatus404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
if status.Status == scheduler.StatusRunning {
return &api.UpdateDAGStepStatus400JSONResponse{
return &api.UpdateRunStepStatus400JSONResponse{
Code: api.ErrorCodeBadRequest,
Message: fmt.Sprintf("request ID %s for DAG %s is still running", request.RequestId, request.DagName),
}, nil
@ -87,20 +88,139 @@ func (a *API) UpdateDAGStepStatus(ctx context.Context, request api.UpdateDAGStep
}
}
if idxToUpdate < 0 {
return &api.UpdateDAGStepStatus404JSONResponse{
return &api.UpdateRunStepStatus404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("step %s not found in DAG %s", request.StepName, request.DagName),
}, nil
}
status.Nodes[idxToUpdate].Status = nodeStatusMapping[request.Body.Status]
status.Nodes[idxToUpdate].StatusText = nodeStatusMapping[request.Body.Status].String()
if err := a.client.UpdateStatus(ctx, request.DagName, *status); err != nil {
rootDAG := digraph.NewRootDAG(request.DagName, request.RequestId)
if err := a.runClient.UpdateStatus(ctx, rootDAG, *status); err != nil {
return nil, fmt.Errorf("error updating status: %w", err)
}
return &api.UpdateDAGStepStatus200Response{}, nil
return &api.UpdateRunStepStatus200Response{}, nil
}
// GetRunDetails implements api.StrictServerInterface.
func (a *API) GetRunDetails(ctx context.Context, request api.GetRunDetailsRequestObject) (api.GetRunDetailsResponseObject, error) {
status, err := a.runClient.FindByRequestID(ctx, request.DagName, request.RequestId)
if err != nil {
return &api.GetRunDetails404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
return &api.GetRunDetails200JSONResponse{
RunDetails: toRunDetails(*status),
}, nil
}
// GetSubRunDetails implements api.StrictServerInterface.
func (a *API) GetSubRunDetails(ctx context.Context, request api.GetSubRunDetailsRequestObject) (api.GetSubRunDetailsResponseObject, error) {
root := digraph.NewRootDAG(request.DagName, request.RequestId)
status, err := a.runClient.FindBySubRunRequestID(ctx, root, request.SubRunRequestId)
if err != nil {
return &api.GetSubRunDetails404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
return &api.GetSubRunDetails200JSONResponse{
RunDetails: toRunDetails(*status),
}, nil
}
// GetSubRunLog implements api.StrictServerInterface.
func (a *API) GetSubRunLog(ctx context.Context, request api.GetSubRunLogRequestObject) (api.GetSubRunLogResponseObject, error) {
root := digraph.NewRootDAG(request.DagName, request.RequestId)
status, err := a.runClient.FindBySubRunRequestID(ctx, root, request.SubRunRequestId)
if err != nil {
return &api.GetSubRunLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
content, err := a.readFileContent(ctx, status.Log, nil)
if err != nil {
return nil, fmt.Errorf("error reading %s: %w", status.Log, err)
}
return &api.GetSubRunLog200JSONResponse{
Content: string(content),
}, nil
}
// GetSubRunStepLog implements api.StrictServerInterface.
func (a *API) GetSubRunStepLog(ctx context.Context, request api.GetSubRunStepLogRequestObject) (api.GetSubRunStepLogResponseObject, error) {
root := digraph.NewRootDAG(request.DagName, request.RequestId)
status, err := a.runClient.FindBySubRunRequestID(ctx, root, request.SubRunRequestId)
if err != nil {
return &api.GetSubRunStepLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
node, err := status.NodeByName(request.StepName)
if err != nil {
return &api.GetSubRunStepLog404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("step %s not found in DAG %s", request.StepName, request.DagName),
}, nil
}
content, err := a.readFileContent(ctx, node.Log, nil)
if err != nil {
return nil, fmt.Errorf("error reading %s: %w", status.Log, err)
}
return &api.GetSubRunStepLog200JSONResponse{
Content: string(content),
}, nil
}
// UpdateSubRunStepStatus implements api.StrictServerInterface.
func (a *API) UpdateSubRunStepStatus(ctx context.Context, request api.UpdateSubRunStepStatusRequestObject) (api.UpdateSubRunStepStatusResponseObject, error) {
root := digraph.NewRootDAG(request.DagName, request.RequestId)
status, err := a.runClient.FindBySubRunRequestID(ctx, root, request.SubRunRequestId)
if err != nil {
return &api.UpdateSubRunStepStatus404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("request ID %s not found for DAG %s", request.RequestId, request.DagName),
}, nil
}
if status.Status == scheduler.StatusRunning {
return &api.UpdateSubRunStepStatus400JSONResponse{
Code: api.ErrorCodeBadRequest,
Message: fmt.Sprintf("request ID %s for DAG %s is still running", request.RequestId, request.DagName),
}, nil
}
idxToUpdate := -1
for idx, n := range status.Nodes {
if n.Step.Name == request.StepName {
idxToUpdate = idx
}
}
if idxToUpdate < 0 {
return &api.UpdateSubRunStepStatus404JSONResponse{
Code: api.ErrorCodeNotFound,
Message: fmt.Sprintf("step %s not found in DAG %s", request.StepName, request.DagName),
}, nil
}
status.Nodes[idxToUpdate].Status = nodeStatusMapping[request.Body.Status]
if err := a.runClient.UpdateStatus(ctx, root, *status); err != nil {
return nil, fmt.Errorf("error updating status: %w", err)
}
return &api.UpdateSubRunStepStatus200Response{}, nil
}
func (a *API) readFileContent(_ context.Context, f string, d *encoding.Decoder) ([]byte, error) {

View File

@ -0,0 +1,177 @@
package api
import (
"github.com/dagu-org/dagu/api/v2"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/runstore"
)
func toDAG(dag *digraph.DAG) api.DAG {
var schedules []api.Schedule
for _, s := range dag.Schedule {
schedules = append(schedules, api.Schedule{Expression: s.Expression})
}
return api.DAG{
Name: dag.Name,
Group: ptrOf(dag.Group),
Description: ptrOf(dag.Description),
Params: ptrOf(dag.Params),
DefaultParams: ptrOf(dag.DefaultParams),
Tags: ptrOf(dag.Tags),
Schedule: ptrOf(schedules),
}
}
func toStep(obj digraph.Step) api.Step {
var conditions []api.Precondition
for _, cond := range obj.Preconditions {
conditions = append(conditions, toPrecondition(cond))
}
repeatPolicy := api.RepeatPolicy{
Repeat: ptrOf(obj.RepeatPolicy.Repeat),
Interval: ptrOf(int(obj.RepeatPolicy.Interval.Seconds())),
}
step := api.Step{
Name: obj.Name,
Description: ptrOf(obj.Description),
Args: ptrOf(obj.Args),
CmdWithArgs: ptrOf(obj.CmdWithArgs),
Command: ptrOf(obj.Command),
Depends: ptrOf(obj.Depends),
Dir: ptrOf(obj.Dir),
MailOnError: ptrOf(obj.MailOnError),
Output: ptrOf(obj.Output),
Preconditions: ptrOf(conditions),
RepeatPolicy: ptrOf(repeatPolicy),
Script: ptrOf(obj.Script),
}
if obj.SubDAG != nil {
step.Run = ptrOf(obj.SubDAG.Name)
step.Params = ptrOf(obj.SubDAG.Params)
}
return step
}
func toPrecondition(obj digraph.Condition) api.Precondition {
return api.Precondition{
Condition: ptrOf(obj.Condition),
Expected: ptrOf(obj.Expected),
}
}
func toRunDetails(s runstore.Status) api.RunDetails {
status := api.RunDetails{
Log: s.Log,
Name: s.Name,
Params: ptrOf(s.Params),
Pid: ptrOf(int(s.PID)),
RequestId: s.RequestID,
StartedAt: s.StartedAt,
FinishedAt: s.FinishedAt,
Status: api.Status(s.Status),
StatusLabel: api.StatusLabel(s.Status.String()),
}
for _, n := range s.Nodes {
status.Nodes = append(status.Nodes, toNode(n))
}
if s.OnSuccess != nil {
status.OnSuccess = ptrOf(toNode(s.OnSuccess))
}
if s.OnFailure != nil {
status.OnFailure = ptrOf(toNode(s.OnFailure))
}
if s.OnCancel != nil {
status.OnCancel = ptrOf(toNode(s.OnCancel))
}
if s.OnExit != nil {
status.OnExit = ptrOf(toNode(s.OnExit))
}
return status
}
func toNode(node *runstore.Node) api.Node {
return api.Node{
DoneCount: node.DoneCount,
FinishedAt: node.FinishedAt,
Log: node.Log,
RetryCount: node.RetryCount,
StartedAt: node.StartedAt,
Status: api.NodeStatus(node.Status),
StatusLabel: api.NodeStatusLabel(node.Status.String()),
Step: toStep(node.Step),
Error: ptrOf(node.Error),
SubRuns: ptrOf(toSubRuns(node.SubRuns)),
}
}
func toSubRuns(subRuns []runstore.SubRun) []api.SubRun {
var result []api.SubRun
for _, r := range subRuns {
result = append(result, api.SubRun{
RequestId: r.RequestID,
})
}
return result
}
func toDAGDetails(dag *digraph.DAG) *api.DAGDetails {
var details *api.DAGDetails
if dag == nil {
return details
}
var steps []api.Step
for _, step := range dag.Steps {
steps = append(steps, toStep(step))
}
handlers := dag.HandlerOn
handlerOn := api.HandlerOn{}
if handlers.Failure != nil {
handlerOn.Failure = ptrOf(toStep(*handlers.Failure))
}
if handlers.Success != nil {
handlerOn.Success = ptrOf(toStep(*handlers.Success))
}
if handlers.Cancel != nil {
handlerOn.Cancel = ptrOf(toStep(*handlers.Cancel))
}
if handlers.Exit != nil {
handlerOn.Exit = ptrOf(toStep(*handlers.Exit))
}
var schedules []api.Schedule
for _, s := range dag.Schedule {
schedules = append(schedules, api.Schedule{
Expression: s.Expression,
})
}
var preconditions []api.Precondition
for _, p := range dag.Preconditions {
preconditions = append(preconditions, toPrecondition(p))
}
return &api.DAGDetails{
Name: dag.Name,
Description: ptrOf(dag.Description),
DefaultParams: ptrOf(dag.DefaultParams),
Delay: ptrOf(int(dag.Delay.Seconds())),
Env: ptrOf(dag.Env),
Group: ptrOf(dag.Group),
HandlerOn: ptrOf(handlerOn),
HistRetentionDays: ptrOf(dag.HistRetentionDays),
LogDir: ptrOf(dag.LogDir),
MaxActiveRuns: ptrOf(dag.MaxActiveRuns),
Params: ptrOf(dag.Params),
Preconditions: ptrOf(preconditions),
Schedule: ptrOf(schedules),
Steps: ptrOf(steps),
Tags: ptrOf(dag.Tags),
}
}

View File

@ -15,12 +15,13 @@ import (
"syscall"
"time"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
apiv1 "github.com/dagu-org/dagu/internal/frontend/api/v1"
apiv2 "github.com/dagu-org/dagu/internal/frontend/api/v2"
"github.com/dagu-org/dagu/internal/frontend/metrics"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/cors"
@ -37,14 +38,14 @@ type Server struct {
}
// NewServer creates a new Server instance with the given configuration and client
func NewServer(cfg *config.Config, cli client.Client) *Server {
func NewServer(cfg *config.Config, dagCli dagstore.Client, runCli runstore.Client) *Server {
var remoteNodes []string
for _, n := range cfg.Server.RemoteNodes {
remoteNodes = append(remoteNodes, n.Name)
}
return &Server{
apiV1: apiv1.New(cli, cfg),
apiV2: apiv2.New(cli, cfg),
apiV1: apiv1.New(dagCli, runCli, cfg),
apiV2: apiv2.New(dagCli, runCli, cfg),
config: cfg,
funcsConfig: funcsConfig{
NavbarColor: cfg.UI.NavbarColor,
@ -83,6 +84,7 @@ func (srv *Server) Serve(ctx context.Context) error {
AllowCredentials: true,
MaxAge: 300, // Maximum value not ignored by any of major browsers
}))
r.Use(middleware.RedirectSlashes)
// Configure API paths
apiV1BasePath, apiV2BasePath := srv.configureAPIPaths()

View File

@ -1,32 +1,33 @@
{{define "base"}}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>{{ navbarTitle }}</title>
<script>
function getConfig() {
return {
apiURL: "{{ apiURL }}",
basePath: "{{ basePath }}",
title: "{{ navbarTitle }}",
navbarColor: "{{ navbarColor }}",
version: "{{ version }}",
tz: "{{ tz }}",
maxDashboardPageLimit: "{{ maxDashboardPageLimit }}",
remoteNodes: "{{ remoteNodes }}",
};
}
</script>
<script
defer="defer"
src="{{ basePath }}/assets/bundle.js?v={{ version }}"
></script>
</head>
<body>
{{template "content" .}}
</body>
<head>
<meta charset="UTF-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<link rel="icon" href="/assets/favicon.ico" />
<title>{{ navbarTitle }}</title>
<script>
function getConfig() {
return {
apiURL: "{{ apiURL }}",
basePath: "{{ basePath }}",
title: "{{ navbarTitle }}",
navbarColor: "{{ navbarColor }}",
version: "{{ version }}",
tz: "{{ tz }}",
maxDashboardPageLimit: "{{ maxDashboardPageLimit }}",
remoteNodes: "{{ remoteNodes }}",
};
}
</script>
<script
defer="defer"
src="{{ basePath }}/assets/bundle.js?v={{ version }}"
></script>
</head>
<body>
{{template "content" .}}
</body>
</html>
{{ end }}

View File

@ -11,7 +11,7 @@ import (
"github.com/dagu-org/dagu/internal/cmd"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/test"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
@ -134,56 +134,56 @@ steps:
// Update the child_2 status to "failed" to simulate a retry
// First, find the child_2 request ID to update its status
ctx := context.Background()
parentRec, err := th.HistoryStore.FindByRequestID(ctx, "parent", requestID)
parentRec, err := th.RunStore.FindByRequestID(ctx, "parent", requestID)
require.NoError(t, err)
updateStatus := func(rec persistence.Record, run *persistence.Run) {
updateStatus := func(rec runstore.Record, status *runstore.Status) {
err = rec.Open(ctx)
require.NoError(t, err)
err = rec.Write(ctx, run.Status)
err = rec.Write(ctx, *status)
require.NoError(t, err)
err = rec.Close(ctx)
require.NoError(t, err)
}
// (1) Find the child_1 node and update its status to "failed"
parentRun, err := parentRec.ReadRun(ctx)
parentStatus, err := parentRec.ReadStatus(ctx)
require.NoError(t, err)
child1Node := parentRun.Status.Nodes[0]
child1Node := parentStatus.Nodes[0]
child1Node.Status = scheduler.NodeStatusError
updateStatus(parentRec, parentRun)
updateStatus(parentRec, parentStatus)
// (2) Find the history record for child_1
// (2) Find the runstore record for child_1
rootDAG := digraph.NewRootDAG("parent", requestID)
child1Rec, err := th.HistoryStore.FindBySubRequestID(ctx, child1Node.RequestID, rootDAG)
child1Rec, err := th.RunStore.FindBySubRunRequestID(ctx, child1Node.SubRuns[0].RequestID, rootDAG)
require.NoError(t, err)
child1Run, err := child1Rec.ReadRun(ctx)
child1Status, err := child1Rec.ReadStatus(ctx)
require.NoError(t, err)
// (3) Find the child_2 node and update its status to "failed"
child2Node := child1Run.Status.Nodes[0]
child2Node := child1Status.Nodes[0]
child2Node.Status = scheduler.NodeStatusError
updateStatus(child1Rec, child1Run)
updateStatus(child1Rec, child1Status)
// (4) Find the history record for child_2
child2Rec, err := th.HistoryStore.FindBySubRequestID(ctx, child2Node.RequestID, rootDAG)
// (4) Find the runstore record for child_2
child2Rec, err := th.RunStore.FindBySubRunRequestID(ctx, child2Node.SubRuns[0].RequestID, rootDAG)
require.NoError(t, err)
child2Run, err := child2Rec.ReadRun(ctx)
child2Status, err := child2Rec.ReadStatus(ctx)
require.NoError(t, err)
require.Equal(t, child2Run.Status.Status.String(), scheduler.NodeStatusSuccess.String())
require.Equal(t, child2Status.Status.String(), scheduler.NodeStatusSuccess.String())
// (5) Update the step in child_2 to "failed" to simulate a retry
child2Run.Status.Nodes[0].Status = scheduler.NodeStatusError
updateStatus(child2Rec, child2Run)
child2Status.Nodes[0].Status = scheduler.NodeStatusError
updateStatus(child2Rec, child2Status)
// (6) Check if the child_2 status is now "failed"
child2Run, err = child2Rec.ReadRun(ctx)
child2Status, err = child2Rec.ReadStatus(ctx)
require.NoError(t, err)
require.Equal(t, child2Run.Status.Nodes[0].Status.String(), scheduler.NodeStatusError.String())
require.Equal(t, child2Status.Nodes[0].Status.String(), scheduler.NodeStatusError.String())
// Retry the DAG
@ -194,11 +194,11 @@ steps:
})
// Check if the child_2 status is now "success"
child2Rec, err = th.HistoryStore.FindBySubRequestID(ctx, child2Node.RequestID, rootDAG)
child2Rec, err = th.RunStore.FindBySubRunRequestID(ctx, child2Node.SubRuns[0].RequestID, rootDAG)
require.NoError(t, err)
child2Run, err = child2Rec.ReadRun(ctx)
child2Status, err = child2Rec.ReadStatus(ctx)
require.NoError(t, err)
require.Equal(t, child2Run.Status.Nodes[0].Status.String(), scheduler.NodeStatusSuccess.String())
require.Equal(t, child2Status.Nodes[0].Status.String(), scheduler.NodeStatusSuccess.String())
}
// verifyLogs checks if the expected log directory and files exist

View File

@ -1,9 +0,0 @@
package persistence
import "errors"
var (
ErrDAGNotFound = errors.New("DAG is not found")
ErrRequestIDNotFound = errors.New("request id not found")
ErrNoStatusData = errors.New("no status data")
)

View File

@ -1,110 +0,0 @@
package persistence
import (
"context"
"fmt"
"time"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/persistence/grep"
)
// HistoryStore manages execution history records for DAGs
type HistoryStore interface {
// NewRecord creates a new history record for a DAG run
NewRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts NewRecordOptions) (Record, error)
// Update updates the status of an existing record identified by name and reqID
Update(ctx context.Context, name, reqID string, status Status) error
// Recent returns the most recent history records for a DAG, limited by itemLimit
Recent(ctx context.Context, name string, itemLimit int) []Record
// Latest returns the most recent history record for a DAG
Latest(ctx context.Context, name string) (Record, error)
// FindByRequestID finds a history record by its request ID
FindByRequestID(ctx context.Context, name string, reqID string) (Record, error)
// FindBySubRequestID finds a sub-DAG history record by its request ID
FindBySubRequestID(ctx context.Context, reqID string, rootDAG digraph.RootDAG) (Record, error)
// RemoveOld removes history records older than retentionDays
RemoveOld(ctx context.Context, name string, retentionDays int) error
// Rename renames all history records from oldName to newName
Rename(ctx context.Context, oldName, newName string) error
}
// NewRecordOptions contains options for creating a new history record
type NewRecordOptions struct {
Root *digraph.RootDAG
Retry bool
}
// Record represents a single execution history record that can be read and written
type Record interface {
// Open prepares the record for writing
Open(ctx context.Context) error
// Write updates the record with new status information
Write(ctx context.Context, status Status) error
// Close finalizes any pending operations on the record
Close(ctx context.Context) error
// ReadRun retrieves the run metadata for this record
ReadRun(ctx context.Context) (*Run, error)
// ReadStatus retrieves the execution status for this record
ReadStatus(ctx context.Context) (*Status, error)
}
// DAGStore manages storage and retrieval of DAG definitions
type DAGStore interface {
// Create stores a new DAG definition with the given name and returns its ID
Create(ctx context.Context, name string, spec []byte) (string, error)
// Delete removes a DAG definition by name
Delete(ctx context.Context, name string) error
// List returns a paginated list of DAG definitions with filtering options
List(ctx context.Context, params ListOptions) (PaginatedResult[*digraph.DAG], []string, error)
// GetMetadata retrieves only the metadata of a DAG definition (faster than full load)
GetMetadata(ctx context.Context, name string) (*digraph.DAG, error)
// GetDetails retrieves the complete DAG definition including all fields
GetDetails(ctx context.Context, name string) (*digraph.DAG, error)
// Grep searches for a pattern in all DAG definitions and returns matching results
Grep(ctx context.Context, pattern string) (ret []*GrepResult, errs []string, err error)
// Rename changes a DAG's identifier from oldID to newID
Rename(ctx context.Context, oldID, newID string) error
// GetSpec retrieves the raw YAML specification of a DAG
GetSpec(ctx context.Context, name string) (string, error)
// UpdateSpec modifies the specification of an existing DAG
UpdateSpec(ctx context.Context, name string, spec []byte) error
// LoadSpec loads a DAG from a YAML file and returns the DAG object
LoadSpec(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error)
// TagList returns all unique tags across all DAGs with any errors encountered
TagList(ctx context.Context) ([]string, []string, error)
}
// Errors for DAGStore operations
var (
ErrDAGAlreadyExists = fmt.Errorf("DAG already exists")
)
// ListOptions contains parameters for paginated DAG listing
type ListOptions struct {
Paginator *Paginator
Name string // Optional name filter
Tag string // Optional tag filter
}
// ListResult contains the result of a paginated DAG listing operation
type ListResult struct {
DAGs []*digraph.DAG // The list of DAGs for the current page
Count int // Total count of DAGs matching the filter
Errors []string // Any errors encountered during listing
}
// GrepResult represents the result of a pattern search within a DAG definition
type GrepResult struct {
Name string // Name of the DAG
DAG *digraph.DAG // The DAG object
Matches []*grep.Match // Matching lines and their context
}
// FlagStore manages persistent flags for DAGs such as suspension state
type FlagStore interface {
// ToggleSuspend changes the suspension state of a DAG by ID
ToggleSuspend(id string, suspend bool) error
// IsSuspended checks if a DAG is currently suspended
IsSuspended(id string) bool
}

View File

@ -1,10 +0,0 @@
package jsondb
import "errors"
// Error definitions for directory structure validation
var (
// ErrInvalidRunDir is returned when an run directory has an invalid format
// and cannot be parsed to extract timestamp and request ID information.
ErrInvalidRunDir = errors.New("invalid run directory")
)

View File

@ -1,27 +0,0 @@
package local
import (
"os"
"testing"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/persistence/local/storage"
"github.com/stretchr/testify/require"
)
func TestFlagStore(t *testing.T) {
tmpDir := fileutil.MustTempDir("test-suspend-checker")
defer func() {
_ = os.RemoveAll(tmpDir)
}()
flagStore := NewFlagStore(storage.NewStorage(tmpDir))
require.False(t, flagStore.IsSuspended("test"))
err := flagStore.ToggleSuspend("test", true)
require.NoError(t, err)
require.True(t, flagStore.IsSuspended("test"))
}

View File

@ -1,53 +0,0 @@
package local
import (
"fmt"
"regexp"
"strings"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/local/storage"
)
type flagStoreImpl struct {
storage *storage.Storage
}
func NewFlagStore(s *storage.Storage) persistence.FlagStore {
return &flagStoreImpl{
storage: s,
}
}
func (f flagStoreImpl) ToggleSuspend(id string, suspend bool) error {
if suspend {
return f.storage.Create(fileName(id))
} else if f.IsSuspended(id) {
return f.storage.Delete(fileName(id))
}
return nil
}
func (f flagStoreImpl) IsSuspended(id string) bool {
return f.storage.Exists(fileName(id))
}
func fileName(id string) string {
return fmt.Sprintf("%s.suspend", normalizeFilename(id, "-"))
}
// https://github.com/sindresorhus/filename-reserved-regex/blob/master/index.js
var (
filenameReservedRegex = regexp.MustCompile(
`[<>:"/\\|?*\x00-\x1F]`,
)
filenameReservedWindowsNamesRegex = regexp.MustCompile(
`(?i)^(con|prn|aux|nul|com[0-9]|lpt[0-9])$`,
)
)
func normalizeFilename(str, replacement string) string {
s := filenameReservedRegex.ReplaceAllString(str, replacement)
s = filenameReservedWindowsNamesRegex.ReplaceAllString(s, replacement)
return strings.ReplaceAll(s, " ", replacement)
}

View File

@ -1,39 +0,0 @@
package storage
import (
"os"
"path"
)
// Storage is a storage for flags.
type Storage struct {
Dir string
}
// defaultPermission is the default permission for the directory.
var defaultPermission os.FileMode = 0750
// NewStorage creates a new storage.
func NewStorage(dir string) *Storage {
_ = os.MkdirAll(dir, defaultPermission)
return &Storage{
Dir: dir,
}
}
// Create creates the given file.
func (s *Storage) Create(file string) error {
return os.WriteFile(path.Join(s.Dir, file), []byte{}, defaultPermission)
}
// Exists returns true if the given file exists.
func (s *Storage) Exists(file string) bool {
_, err := os.Stat(path.Join(s.Dir, file))
return err == nil
}
// Delete deletes the given file.
func (s *Storage) Delete(file string) error {
return os.Remove(path.Join(s.Dir, file))
}

View File

@ -1,32 +0,0 @@
package storage
import (
"os"
"testing"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/stretchr/testify/require"
)
func TestStorage(t *testing.T) {
tmpDir := fileutil.MustTempDir("test-storage")
defer os.RemoveAll(tmpDir)
storage := NewStorage(tmpDir)
f := "test.flag"
exist := storage.Exists(f)
require.False(t, exist)
err := storage.Create(f)
require.NoError(t, err)
exist = storage.Exists(f)
require.True(t, exist)
err = storage.Delete(f)
require.NoError(t, err)
exist = storage.Exists(f)
require.False(t, exist)
}

View File

@ -1,15 +0,0 @@
package persistence
// Run represents metadata about a DAG run
type Run struct {
File string
Status Status
}
// NewRun creates a new Run instance with the specified file path and status
func NewRun(file string, status Status) *Run {
return &Run{
File: file,
Status: status,
}
}

381
internal/runstore/client.go Normal file
View File

@ -0,0 +1,381 @@
package runstore
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"slices"
"strings"
"syscall"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/sock"
"github.com/google/uuid"
)
// NewClient creates a new Client instance.
// The Client is used to interact with the DAG.
func NewClient(
runStore Store,
executable string,
workDir string,
) Client {
return Client{
runStore: runStore,
executable: executable,
workDir: workDir,
}
}
// Client provides methods to interact with DAGs, including starting, stopping,
// restarting, and retrieving status information. It communicates with the DAG
// through a socket interface and manages run records through a Store.
type Client struct {
runStore Store // Store interface for persisting run data
executable string // Path to the executable used to run DAGs
workDir string // Working directory for executing commands
}
// LoadYAML loads a DAG from YAML specification bytes without evaluating it.
// It appends the WithoutEval option to any provided options.
func (e *Client) LoadYAML(ctx context.Context, spec []byte, opts ...digraph.LoadOption) (*digraph.DAG, error) {
opts = append(slices.Clone(opts), digraph.WithoutEval())
return digraph.LoadYAML(ctx, spec, opts...)
}
// Rename changes the name of a DAG from oldName to newName in the run store.
func (e *Client) Rename(ctx context.Context, oldName, newName string) error {
if err := e.runStore.Rename(ctx, oldName, newName); err != nil {
return fmt.Errorf("failed to rename DAG: %w", err)
}
return nil
}
// Stop stops a running DAG by sending a stop request to its socket.
// If the DAG is not running, it logs a message and returns nil.
func (e *Client) Stop(ctx context.Context, dag *digraph.DAG, requestID string) error {
logger.Info(ctx, "Stopping", "name", dag.Name)
addr := dag.SockAddr(requestID)
if !fileutil.FileExists(addr) {
logger.Info(ctx, "The DAG is not running", "name", dag.Name)
return nil
}
client := sock.NewClient(addr)
_, err := client.Request("POST", "/stop")
return err
}
// GenerateRequestID generates a unique request ID for a DAG run using UUID v7.
func (e *Client) GenerateRequestID(_ context.Context) (string, error) {
// Generate a unique request ID for the DAG run
id, err := uuid.NewV7()
if err != nil {
return "", fmt.Errorf("failed to generate request ID: %w", err)
}
return id.String(), nil
}
// Start starts a DAG by executing the configured executable with appropriate arguments.
// It sets up the command to run in its own process group and configures standard output/error.
func (e *Client) Start(_ context.Context, dag *digraph.DAG, opts StartOptions) error {
args := []string{"start"}
if opts.Params != "" {
args = append(args, "-p")
args = append(args, fmt.Sprintf(`"%s"`, escapeArg(opts.Params)))
}
if opts.Quiet {
args = append(args, "-q")
}
if opts.RequestID != "" {
args = append(args, fmt.Sprintf("--request-id=%s", opts.RequestID))
}
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Start()
}
// Restart restarts a DAG by executing the configured executable with the restart command.
// It sets up the command to run in its own process group.
func (e *Client) Restart(_ context.Context, dag *digraph.DAG, opts RestartOptions) error {
args := []string{"restart"}
if opts.Quiet {
args = append(args, "-q")
}
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
return cmd.Start()
}
// Retry retries a DAG execution with the specified requestID by executing
// the configured executable with the retry command.
func (e *Client) Retry(_ context.Context, dag *digraph.DAG, requestID string) error {
args := []string{"retry"}
args = append(args, fmt.Sprintf("--request-id=%s", requestID))
args = append(args, dag.Location)
// nolint:gosec
cmd := exec.Command(e.executable, args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true, Pgid: 0}
cmd.Dir = e.workDir
cmd.Env = os.Environ()
return cmd.Start()
}
// IsRunning checks if a DAG is currently running by attempting to get its current status.
// Returns true if the status can be retrieved without error, false otherwise.
func (e *Client) IsRunning(ctx context.Context, dag *digraph.DAG, requestID string) bool {
_, err := e.currentStatus(ctx, dag, requestID)
return err == nil
}
// GetRealtimeStatus retrieves the current status of a DAG.
// If the DAG is running, it gets the status from the socket.
// If the socket doesn't exist or times out, it falls back to stored status or creates an initial status.
func (e *Client) GetRealtimeStatus(ctx context.Context, dag *digraph.DAG, requestId string) (*Status, error) {
status, err := e.currentStatus(ctx, dag, requestId)
if err != nil {
// No such file or directory
if errors.Is(err, os.ErrNotExist) {
goto FALLBACK
}
if errors.Is(err, sock.ErrTimeout) {
goto FALLBACK
}
return nil, fmt.Errorf("failed to get current status: %w", err)
}
return status, nil
FALLBACK:
if requestId == "" {
// The DAG is not running so return the default status
status := InitialStatus(dag)
return &status, nil
}
return e.findPersistedStatus(ctx, dag, requestId)
}
// FindByRequestID retrieves the status of a DAG run by name and requestID from the run store.
func (e *Client) FindByRequestID(ctx context.Context, name string, requestID string) (*Status, error) {
record, err := e.runStore.FindByRequestID(ctx, name, requestID)
if err != nil {
return nil, fmt.Errorf("failed to find status by request id: %w", err)
}
latestStatus, err := record.ReadStatus(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read status: %w", err)
}
return latestStatus, nil
}
// findPersistedStatus retrieves the status of a DAG run by requestID.
// If the stored status indicates the DAG is running, it attempts to get the current status.
// If that fails, it marks the status as error.
func (e *Client) findPersistedStatus(ctx context.Context, dag *digraph.DAG, requestID string) (
*Status, error,
) {
record, err := e.runStore.FindByRequestID(ctx, dag.Name, requestID)
if err != nil {
return nil, fmt.Errorf("failed to find status by request id: %w", err)
}
latestStatus, err := record.ReadStatus(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read status: %w", err)
}
// If the DAG is running, query the current status
if latestStatus.Status == scheduler.StatusRunning {
currentStatus, err := e.currentStatus(ctx, dag, latestStatus.RequestID)
if err == nil {
return currentStatus, nil
}
}
// If querying the current status fails, even if the status is running,
// set the status to error
if latestStatus.Status == scheduler.StatusRunning {
latestStatus.Status = scheduler.StatusError
}
return latestStatus, nil
}
// FindBySubRunRequestID retrieves the status of a sub-run by its request ID.
func (e *Client) FindBySubRunRequestID(ctx context.Context, root digraph.RootDAG, requestID string) (*Status, error) {
record, err := e.runStore.FindBySubRunRequestID(ctx, requestID, root)
if err != nil {
return nil, fmt.Errorf("failed to find sub-run status by request id: %w", err)
}
latestStatus, err := record.ReadStatus(ctx)
if err != nil {
return nil, fmt.Errorf("failed to read status: %w", err)
}
return latestStatus, nil
}
// currentStatus retrieves the current status of a running DAG by querying its socket.
// This is a private method used internally by other status-related methods.
func (*Client) currentStatus(_ context.Context, dag *digraph.DAG, requestId string) (*Status, error) {
// FIXME: Should handle the case of dynamic DAG
client := sock.NewClient(dag.SockAddr(requestId))
statusJSON, err := client.Request("GET", "/status")
if err != nil {
return nil, fmt.Errorf("failed to get current status: %w", err)
}
return StatusFromJSON(statusJSON)
}
// GetLatestStatus retrieves the latest status of a DAG.
// If the DAG is running, it attempts to get the current status from the socket.
// If that fails or no status exists, it returns an initial status or an error.
func (e *Client) GetLatestStatus(ctx context.Context, dag *digraph.DAG) (Status, error) {
var latestStatus *Status
// Find the latest status by name
record, err := e.runStore.Latest(ctx, dag.Name)
if err != nil {
goto handleError
}
// Read the latest status
latestStatus, err = record.ReadStatus(ctx)
if err != nil {
goto handleError
}
// If the DAG is running, query the current status
if latestStatus.Status == scheduler.StatusRunning {
currentStatus, err := e.currentStatus(ctx, dag, latestStatus.RequestID)
if err == nil {
return *currentStatus, nil
}
}
// If querying the current status fails, even if the status is running,
// set the status to error
if latestStatus.Status == scheduler.StatusRunning {
latestStatus.Status = scheduler.StatusError
}
return *latestStatus, nil
handleError:
// If the latest status is not found, return the default status
ret := InitialStatus(dag)
if errors.Is(err, ErrNoStatusData) {
// No status for today
return ret, nil
}
return ret, err
}
// ListRecentHistory retrieves the n most recent status records for a DAG by name.
// It returns a slice of Status objects, filtering out any that cannot be read.
func (e *Client) ListRecentHistory(ctx context.Context, name string, n int) []Status {
records := e.runStore.Recent(ctx, name, n)
var runs []Status
for _, record := range records {
if status, err := record.ReadStatus(ctx); err == nil {
runs = append(runs, *status)
}
}
return runs
}
// UpdateStatus updates the status of a DAG run in the run store.
func (e *Client) UpdateStatus(ctx context.Context, root digraph.RootDAG, status Status) error {
// Check for context cancellation
select {
case <-ctx.Done():
return fmt.Errorf("update canceled: %w", ctx.Err())
default:
// Continue with operation
}
// Find the runstore record
var historyRecord Record
if root.RequestID == status.RequestID {
// If the request ID matches the root DAG's request ID, find the runstore record by request ID
r, err := e.runStore.FindByRequestID(ctx, root.Name, status.RequestID)
if err != nil {
return fmt.Errorf("failed to find runstore record: %w", err)
}
historyRecord = r
} else {
// If the request ID does not match, find the runstore record by sub-run request ID
r, err := e.runStore.FindBySubRunRequestID(ctx, status.RequestID, root)
if err != nil {
return fmt.Errorf("failed to find sub-runstore record: %w", err)
}
historyRecord = r
}
// Open, write, and close the runstore record
if err := historyRecord.Open(ctx); err != nil {
return fmt.Errorf("failed to open runstore record: %w", err)
}
// Ensure the record is closed even if write fails
defer func() {
if closeErr := historyRecord.Close(ctx); closeErr != nil {
logger.Errorf(ctx, "Failed to close runstore record: %v", closeErr)
}
}()
if err := historyRecord.Write(ctx, status); err != nil {
return fmt.Errorf("failed to write status: %w", err)
}
return nil
}
// escapeArg escapes special characters in command arguments.
// Currently handles carriage returns and newlines by adding backslashes.
func escapeArg(input string) string {
escaped := strings.Builder{}
for _, char := range input {
switch char {
case '\r':
_, _ = escaped.WriteString("\\r")
case '\n':
_, _ = escaped.WriteString("\\n")
default:
_, _ = escaped.WriteRune(char)
}
}
return escaped.String()
}
// StartOptions contains options for starting a DAG.
type StartOptions struct {
Params string // Parameters to pass to the DAG
Quiet bool // Whether to run in quiet mode
RequestID string // Request ID for the DAG run
}
// RestartOptions contains options for restarting a DAG.
type RestartOptions struct {
Quiet bool // Whether to run in quiet mode
}

View File

@ -0,0 +1,230 @@
package runstore_test
import (
"encoding/json"
"net/http"
"path/filepath"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/sock"
"github.com/dagu-org/dagu/internal/test"
)
func TestClient_GetStatus(t *testing.T) {
t.Parallel()
th := test.Setup(t)
t.Run("Valid", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "valid.yaml"))
ctx := th.Context
requestID := uuid.Must(uuid.NewV7()).String()
socketServer, _ := sock.NewServer(
dag.SockAddr(requestID),
func(w http.ResponseWriter, _ *http.Request) {
status := runstore.NewStatusBuilder(dag.DAG).Create(
requestID, scheduler.StatusRunning, 0, time.Now(),
)
w.WriteHeader(http.StatusOK)
jsonData, err := json.Marshal(status)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
_, _ = w.Write(jsonData)
},
)
go func() {
_ = socketServer.Serve(ctx, nil)
_ = socketServer.Shutdown(ctx)
}()
dag.AssertCurrentStatus(t, scheduler.StatusRunning)
_ = socketServer.Shutdown(ctx)
dag.AssertCurrentStatus(t, scheduler.StatusNone)
})
t.Run("UpdateStatus", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "update_status.yaml"))
requestID := uuid.Must(uuid.NewV7()).String()
now := time.Now()
ctx := th.Context
cli := th.RunClient
// Open the runstore store and write a status before updating it.
record, err := th.RunStore.NewRecord(ctx, dag.DAG, now, requestID, runstore.NewRecordOptions{})
require.NoError(t, err)
err = record.Open(ctx)
require.NoError(t, err)
status := testNewStatus(dag.DAG, requestID, scheduler.StatusSuccess, scheduler.NodeStatusSuccess)
err = record.Write(ctx, status)
require.NoError(t, err)
_ = record.Close(ctx)
// Get the status and check if it is the same as the one we wrote.
statusToCheck, err := cli.FindByRequestID(ctx, dag.Name, requestID)
require.NoError(t, err)
require.Equal(t, scheduler.NodeStatusSuccess, statusToCheck.Nodes[0].Status)
// Update the status.
newStatus := scheduler.NodeStatusError
status.Nodes[0].Status = newStatus
rootDAG := digraph.NewRootDAG(dag.Name, requestID)
err = cli.UpdateStatus(ctx, rootDAG, status)
require.NoError(t, err)
statusByRequestID, err := cli.FindByRequestID(ctx, dag.Name, requestID)
require.NoError(t, err)
require.Equal(t, 1, len(status.Nodes))
require.Equal(t, newStatus, statusByRequestID.Nodes[0].Status)
})
t.Run("UpdateSubRunStatus", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "tree_parent.yaml"))
dagStatus, err := th.DAGClient.Status(th.Context, dag.Location)
require.NoError(t, err)
err = th.RunClient.Start(th.Context, dagStatus.DAG, runstore.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
// Get the sub run status.
status, err := th.RunClient.GetLatestStatus(th.Context, dag.DAG)
require.NoError(t, err)
requestId := status.RequestID
subRun := status.Nodes[0].SubRuns[0]
rootDAG := digraph.NewRootDAG(dag.Name, requestId)
subRunStatus, err := th.RunClient.FindBySubRunRequestID(th.Context, rootDAG, subRun.RequestID)
require.NoError(t, err)
require.Equal(t, scheduler.StatusSuccess.String(), subRunStatus.Status.String())
// Update the sub run status.
subRunStatus.Nodes[0].Status = scheduler.NodeStatusError
err = th.RunClient.UpdateStatus(th.Context, rootDAG, *subRunStatus)
require.NoError(t, err)
// Check if the sub run status is updated.
subRunStatus, err = th.RunClient.FindBySubRunRequestID(th.Context, rootDAG, subRun.RequestID)
require.NoError(t, err)
require.Equal(t, scheduler.NodeStatusError.String(), subRunStatus.Nodes[0].Status.String())
})
t.Run("InvalidUpdateStatusWithInvalidReqID", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "invalid_reqid.yaml"))
ctx := th.Context
cli := th.RunClient
// update with invalid request id
status := testNewStatus(dag.DAG, "unknown-req-id", scheduler.StatusError, scheduler.NodeStatusError)
// Check if the update fails.
rootDAG := digraph.NewRootDAG(dag.Name, "unknown-req-id")
err := cli.UpdateStatus(ctx, rootDAG, status)
require.Error(t, err)
})
}
func TestClient_RunDAG(t *testing.T) {
th := test.Setup(t)
t.Run("RunDAG", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "run_dag.yaml"))
dagStatus, err := th.DAGClient.Status(th.Context, dag.Location)
require.NoError(t, err)
err = th.RunClient.Start(th.Context, dagStatus.DAG, runstore.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
status, err := th.RunClient.GetLatestStatus(th.Context, dagStatus.DAG)
require.NoError(t, err)
require.Equal(t, scheduler.StatusSuccess.String(), status.Status.String())
})
t.Run("Stop", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "stop.yaml"))
ctx := th.Context
err := th.RunClient.Start(ctx, dag.DAG, runstore.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusRunning)
err = th.RunClient.Stop(ctx, dag.DAG, "")
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusCancel)
})
t.Run("Restart", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "restart.yaml"))
ctx := th.Context
err := th.RunClient.Start(th.Context, dag.DAG, runstore.StartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusRunning)
err = th.RunClient.Restart(ctx, dag.DAG, runstore.RestartOptions{})
require.NoError(t, err)
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
})
t.Run("Retry", func(t *testing.T) {
dag := th.DAG(t, filepath.Join("client", "retry.yaml"))
ctx := th.Context
cli := th.RunClient
err := cli.Start(ctx, dag.DAG, runstore.StartOptions{Params: "x y z"})
require.NoError(t, err)
// Wait for the DAG to finish
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
// Retry the DAG with the same params.
status, err := cli.GetLatestStatus(ctx, dag.DAG)
require.NoError(t, err)
previousRequestID := status.RequestID
previousParams := status.Params
time.Sleep(1 * time.Second)
err = cli.Retry(ctx, dag.DAG, previousRequestID)
require.NoError(t, err)
// Wait for the DAG to finish
dag.AssertLatestStatus(t, scheduler.StatusSuccess)
status, err = cli.GetLatestStatus(ctx, dag.DAG)
require.NoError(t, err)
// Check if the params are the same as the previous run.
require.Equal(t, previousRequestID, status.RequestID)
require.Equal(t, previousParams, status.Params)
})
}
func testNewStatus(dag *digraph.DAG, requestID string, status scheduler.Status, nodeStatus scheduler.NodeStatus) runstore.Status {
nodes := []scheduler.NodeData{{State: scheduler.NodeState{Status: nodeStatus}}}
tm := time.Now()
startedAt := &tm
return runstore.NewStatusBuilder(dag).Create(
requestID, status, 0, *startedAt, runstore.WithNodes(nodes),
)
}

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
// nolint: gosec
@ -21,10 +21,10 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
)
// DataRoot manages the directory structure for a DAG's run history.
// DataRoot manages the directory structure for run history data.
// It handles the organization of run data in a hierarchical structure
// based on year, month, and day.
type DataRoot struct {
@ -47,7 +47,7 @@ func WithRootDAG(rootDAG *digraph.RootDAG) RootOption {
}
}
// NewDataRoot creates a new DataRoot instance for managing a DAG's run history.
// NewDataRoot creates a new DataRoot instance for managing a DAG's run runstore.
// It sanitizes the DAG name to create a safe directory structure and applies any provided options.
//
// Parameters:
@ -105,7 +105,7 @@ func (dr *DataRoot) FindByRequestID(_ context.Context, requestID string) (*Run,
}
if len(matches) == 0 {
return nil, fmt.Errorf("%w: %s", persistence.ErrRequestIDNotFound, requestID)
return nil, fmt.Errorf("%w: %s", runstore.ErrRequestIDNotFound, requestID)
}
// Sort matches by timestamp (most recent first)
@ -129,10 +129,10 @@ func (dr *DataRoot) LatestAfter(ctx context.Context, cutoff TimeInUTC) (*Run, er
return nil, fmt.Errorf("failed to list recent runs: %w", err)
}
if len(runs) == 0 {
return nil, persistence.ErrNoStatusData
return nil, runstore.ErrNoStatusData
}
if runs[0].timestamp.Before(cutoff.Time) {
return nil, persistence.ErrNoStatusData
return nil, runstore.ErrNoStatusData
}
return runs[0], nil
}

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"context"
@ -8,7 +8,7 @@ import (
"testing"
"time"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -109,7 +109,7 @@ func TestDataRootRuns(t *testing.T) {
latest := root.CreateTestRun(t, "test-id3", ts3)
_, err := root.LatestAfter(context.Background(), ts4)
require.ErrorIs(t, err, persistence.ErrNoStatusData, "LatestAfter should return ErrNoStatusData when no runs are found")
require.ErrorIs(t, err, runstore.ErrNoStatusData, "LatestAfter should return ErrNoStatusData when no runs are found")
run, err := root.LatestAfter(context.Background(), ts3)
require.NoError(t, err)

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"context"
@ -8,9 +8,9 @@ import (
"time"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/filecache"
"github.com/dagu-org/dagu/internal/runstore"
)
// Error definitions for common issues
@ -20,29 +20,29 @@ var (
ErrRootRequestIDEmpty = errors.New("root requestID is empty")
)
var _ persistence.HistoryStore = (*JSONDB)(nil)
var _ runstore.Store = (*fileStore)(nil)
// JSONDB manages DAGs status files in local storage with high performance and reliability.
type JSONDB struct {
baseDir string // Base directory for all status files
latestStatusToday bool // Whether to only return today's status
cache *filecache.Cache[*persistence.Status] // Optional cache for read operations
maxWorkers int // Maximum number of parallel workers
// fileStore manages DAGs status files in local storage with high performance and reliability.
type fileStore struct {
baseDir string // Base directory for all status files
latestStatusToday bool // Whether to only return today's status
cache *fileutil.Cache[*runstore.Status] // Optional cache for read operations
maxWorkers int // Maximum number of parallel workers
}
// Option defines functional options for configuring JSONDB.
// Option defines functional options for configuring local.
type Option func(*Options)
// Options holds configuration options for JSONDB.
// Options holds configuration options for local.
type Options struct {
FileCache *filecache.Cache[*persistence.Status] // Optional cache for status files
LatestStatusToday bool // Whether to only return today's status
MaxWorkers int // Maximum number of parallel workers
OperationTimeout time.Duration // Timeout for operations
FileCache *fileutil.Cache[*runstore.Status] // Optional cache for status files
LatestStatusToday bool // Whether to only return today's status
MaxWorkers int // Maximum number of parallel workers
OperationTimeout time.Duration // Timeout for operations
}
// WithFileCache sets the file cache for JSONDB.
func WithFileCache(cache *filecache.Cache[*persistence.Status]) Option {
// WithFileCache sets the file cache for local.
func WithFileCache(cache *fileutil.Cache[*runstore.Status]) Option {
return func(o *Options) {
o.FileCache = cache
}
@ -56,7 +56,7 @@ func WithLatestStatusToday(latestStatusToday bool) Option {
}
// New creates a new JSONDB instance with the specified options.
func New(baseDir string, opts ...Option) *JSONDB {
func New(baseDir string, opts ...Option) *fileStore {
options := &Options{
LatestStatusToday: true,
MaxWorkers: runtime.NumCPU(),
@ -66,7 +66,7 @@ func New(baseDir string, opts ...Option) *JSONDB {
opt(options)
}
return &JSONDB{
return &fileStore{
baseDir: baseDir,
latestStatusToday: options.LatestStatusToday,
cache: options.FileCache,
@ -74,50 +74,10 @@ func New(baseDir string, opts ...Option) *JSONDB {
}
}
// Update updates the status for a specific request ID.
// It handles the entire lifecycle of opening, writing, and closing the history record.
func (db *JSONDB) Update(ctx context.Context, dagName, reqID string, status persistence.Status) error {
// Check for context cancellation
select {
case <-ctx.Done():
return fmt.Errorf("update canceled: %w", ctx.Err())
default:
// Continue with operation
}
if reqID == "" {
return ErrRequestIDEmpty
}
// Find the history record
historyRecord, err := db.FindByRequestID(ctx, dagName, reqID)
if err != nil {
return fmt.Errorf("failed to find history record: %w", err)
}
// Open, write, and close the history record
if err := historyRecord.Open(ctx); err != nil {
return fmt.Errorf("failed to open history record: %w", err)
}
// Ensure the record is closed even if write fails
defer func() {
if closeErr := historyRecord.Close(ctx); closeErr != nil {
logger.Errorf(ctx, "Failed to close history record: %v", closeErr)
}
}()
if err := historyRecord.Write(ctx, status); err != nil {
return fmt.Errorf("failed to write status: %w", err)
}
return nil
}
// NewRecord creates a new history record for the specified DAG run.
// NewRecord creates a new runstore record for the specified DAG run.
// If opts.Root is not nil, it creates a sub-record for the specified root DAG.
// If opts.Retry is true, it creates a retry record for the specified request ID.
func (db *JSONDB) NewRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts persistence.NewRecordOptions) (persistence.Record, error) {
func (db *fileStore) NewRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts runstore.NewRecordOptions) (runstore.Record, error) {
if reqID == "" {
return nil, ErrRequestIDEmpty
}
@ -152,8 +112,8 @@ func (db *JSONDB) NewRecord(ctx context.Context, dag *digraph.DAG, timestamp tim
return record, nil
}
// NewSubRecord creates a new history record for the specified sub-run.
func (db *JSONDB) newSubRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts persistence.NewRecordOptions) (persistence.Record, error) {
// NewSubRecord creates a new runstore record for the specified sub-run.
func (db *fileStore) newSubRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts runstore.NewRecordOptions) (runstore.Record, error) {
dataRoot := NewDataRoot(db.baseDir, opts.Root.Name)
rootRun, err := dataRoot.FindByRequestID(ctx, opts.Root.RequestID)
if err != nil {
@ -186,8 +146,8 @@ func (db *JSONDB) newSubRecord(ctx context.Context, dag *digraph.DAG, timestamp
return record, nil
}
// Recent returns the most recent history records for the specified key, up to itemLimit.
func (db *JSONDB) Recent(ctx context.Context, dagName string, itemLimit int) []persistence.Record {
// Recent returns the most recent runstore records for the specified key, up to itemLimit.
func (db *fileStore) Recent(ctx context.Context, dagName string, itemLimit int) []runstore.Record {
// Check for context cancellation
select {
case <-ctx.Done():
@ -207,7 +167,7 @@ func (db *JSONDB) Recent(ctx context.Context, dagName string, itemLimit int) []p
items := root.Latest(ctx, itemLimit)
// Get the latest record for each item
records := make([]persistence.Record, 0, len(items))
records := make([]runstore.Record, 0, len(items))
for _, item := range items {
record, err := item.LatestRecord(ctx, db.cache)
if err != nil {
@ -220,8 +180,8 @@ func (db *JSONDB) Recent(ctx context.Context, dagName string, itemLimit int) []p
return records
}
// Latest returns the most recent history record for today.
func (db *JSONDB) Latest(ctx context.Context, dagName string) (persistence.Record, error) {
// Latest returns the most recent runstore record for today.
func (db *fileStore) Latest(ctx context.Context, dagName string) (runstore.Record, error) {
// Check for context cancellation
select {
case <-ctx.Done():
@ -248,13 +208,13 @@ func (db *JSONDB) Latest(ctx context.Context, dagName string) (persistence.Recor
// Get the latest file
latestRun := root.Latest(ctx, 1)
if len(latestRun) == 0 {
return nil, persistence.ErrNoStatusData
return nil, runstore.ErrNoStatusData
}
return latestRun[0].LatestRecord(ctx, db.cache)
}
// FindByRequestID finds a history record by request ID.
func (db *JSONDB) FindByRequestID(ctx context.Context, dagName, reqID string) (persistence.Record, error) {
// FindByRequestID finds a runstore record by request ID.
func (db *fileStore) FindByRequestID(ctx context.Context, dagName, reqID string) (runstore.Record, error) {
// Check for context cancellation
select {
case <-ctx.Done():
@ -277,8 +237,8 @@ func (db *JSONDB) FindByRequestID(ctx context.Context, dagName, reqID string) (p
return run.LatestRecord(ctx, db.cache)
}
// FindBySubRequestID finds a history record by request ID for a sub-DAG.
func (db *JSONDB) FindBySubRequestID(ctx context.Context, reqID string, rootDAG digraph.RootDAG) (persistence.Record, error) {
// FindBySubRunRequestID finds a runstore record by request ID for a sub-DAG.
func (db *fileStore) FindBySubRunRequestID(ctx context.Context, reqID string, rootDAG digraph.RootDAG) (runstore.Record, error) {
// Check for context cancellation
select {
case <-ctx.Done():
@ -304,8 +264,8 @@ func (db *JSONDB) FindBySubRequestID(ctx context.Context, reqID string, rootDAG
return subRun.LatestRecord(ctx, db.cache)
}
// RemoveOld removes history records older than retentionDays for the specified key.
func (db *JSONDB) RemoveOld(ctx context.Context, dagName string, retentionDays int) error {
// RemoveOld removes runstore records older than retentionDays for the specified key.
func (db *fileStore) RemoveOld(ctx context.Context, dagName string, retentionDays int) error {
// Check for context cancellation
select {
case <-ctx.Done():
@ -323,8 +283,8 @@ func (db *JSONDB) RemoveOld(ctx context.Context, dagName string, retentionDays i
return root.RemoveOld(ctx, retentionDays)
}
// Rename renames all history records from oldKey to newKey.
func (db *JSONDB) Rename(ctx context.Context, oldNameOrPath, newNameOrPath string) error {
// Rename renames all runstore records from oldKey to newKey.
func (db *fileStore) Rename(ctx context.Context, oldNameOrPath, newNameOrPath string) error {
// Check for context cancellation
select {
case <-ctx.Done():

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"testing"
@ -6,7 +6,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -96,38 +96,7 @@ func TestJSONDB(t *testing.T) {
// Verify an error is returned if the request ID does not exist
_, err = th.DB.FindByRequestID(th.Context, "test_DAG", "nonexistent-id")
assert.ErrorIs(t, err, persistence.ErrRequestIDNotFound)
})
t.Run("UpdateRecord", func(t *testing.T) {
th := setupTestJSONDB(t)
// Create a timestamp for the record
timestamp := time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)
th.CreateRecord(t, timestamp, "request-id-1", scheduler.StatusRunning)
// Verify the status is created
record, err := th.DB.FindByRequestID(th.Context, "test_DAG", "request-id-1")
require.NoError(t, err)
// Update the status
status, err := record.ReadStatus(th.Context)
require.NoError(t, err)
assert.Equal(t, scheduler.StatusRunning.String(), status.Status.String())
// Update the status to success
status.Status = scheduler.StatusSuccess
err = th.DB.Update(th.Context, "test_DAG", "request-id-1", *status)
require.NoError(t, err)
// Verify the status is updated
record, err = th.DB.FindByRequestID(th.Context, "test_DAG", "request-id-1")
require.NoError(t, err)
// Verify the status is updated
status, err = record.ReadStatus(th.Context)
require.NoError(t, err)
assert.Equal(t, scheduler.StatusSuccess.String(), status.Status.String())
assert.ErrorIs(t, err, runstore.ErrRequestIDNotFound)
})
t.Run("RemoveOld", func(t *testing.T) {
th := setupTestJSONDB(t)
@ -167,7 +136,7 @@ func TestJSONDB(t *testing.T) {
// Create a sub record
rootDAG := digraph.NewRootDAG("test_DAG", "parent-id")
subDAG := th.DAG("sub_dag")
record, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", persistence.NewRecordOptions{
record, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", runstore.NewRecordOptions{
Root: &rootDAG,
})
require.NoError(t, err)
@ -179,13 +148,13 @@ func TestJSONDB(t *testing.T) {
_ = record.Close(th.Context)
}()
statusToWrite := persistence.NewStatusFactory(subDAG.DAG).Default()
statusToWrite := runstore.InitialStatus(subDAG.DAG)
statusToWrite.RequestID = "sub-id"
err = record.Write(th.Context, statusToWrite)
require.NoError(t, err)
// Verify record is created
existingRecord, err := th.DB.FindBySubRequestID(th.Context, "sub-id", rootDAG)
existingRecord, err := th.DB.FindBySubRunRequestID(th.Context, "sub-id", rootDAG)
require.NoError(t, err)
status, err := existingRecord.ReadStatus(th.Context)
@ -204,7 +173,7 @@ func TestJSONDB(t *testing.T) {
// Create a sub record
rootDAG := digraph.NewRootDAG("test_DAG", "parent-id")
subDAG := th.DAG("sub_dag")
record, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", persistence.NewRecordOptions{
record, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", runstore.NewRecordOptions{
Root: &rootDAG,
})
require.NoError(t, err)
@ -216,7 +185,7 @@ func TestJSONDB(t *testing.T) {
_ = record.Close(th.Context)
}()
statusToWrite := persistence.NewStatusFactory(subDAG.DAG).Default()
statusToWrite := runstore.InitialStatus(subDAG.DAG)
statusToWrite.RequestID = "sub-id"
statusToWrite.Status = scheduler.StatusRunning
err = record.Write(th.Context, statusToWrite)
@ -224,7 +193,7 @@ func TestJSONDB(t *testing.T) {
// Find the sub run by request ID
ts = time.Date(2021, 1, 2, 0, 0, 0, 0, time.UTC)
existingRecord, err := th.DB.FindBySubRequestID(th.Context, "sub-id", rootDAG)
existingRecord, err := th.DB.FindBySubRunRequestID(th.Context, "sub-id", rootDAG)
require.NoError(t, err)
existingRecordStatus, err := existingRecord.ReadStatus(th.Context)
require.NoError(t, err)
@ -232,7 +201,7 @@ func TestJSONDB(t *testing.T) {
assert.Equal(t, scheduler.StatusRunning.String(), existingRecordStatus.Status.String())
// Create a retry record and write different status
retryRecord, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", persistence.NewRecordOptions{
retryRecord, err := th.DB.NewRecord(th.Context, subDAG.DAG, ts, "sub-id", runstore.NewRecordOptions{
Root: &rootDAG,
Retry: true,
})
@ -243,11 +212,40 @@ func TestJSONDB(t *testing.T) {
_ = retryRecord.Close(th.Context)
// Verify the retry record is created
existingRecord, err = th.DB.FindBySubRequestID(th.Context, "sub-id", rootDAG)
existingRecord, err = th.DB.FindBySubRunRequestID(th.Context, "sub-id", rootDAG)
require.NoError(t, err)
existingRecordStatus, err = existingRecord.ReadStatus(th.Context)
require.NoError(t, err)
assert.Equal(t, "sub-id", existingRecordStatus.RequestID)
assert.Equal(t, scheduler.StatusSuccess.String(), existingRecordStatus.Status.String())
})
t.Run("ReadDAG", func(t *testing.T) {
th := setupTestJSONDB(t)
// Create a timestamp for the parent record
ts := time.Date(2021, 1, 2, 0, 0, 0, 0, time.UTC)
// Create a parent record
rec := th.CreateRecord(t, ts, "parent-id", scheduler.StatusRunning)
// Write the status
err := rec.Open(th.Context)
require.NoError(t, err)
defer func() {
_ = rec.Close(th.Context)
}()
statusToWrite := runstore.InitialStatus(rec.dag)
statusToWrite.RequestID = "parent-id"
err = rec.Write(th.Context, statusToWrite)
require.NoError(t, err)
// Read the DAG and verify it matches the original
dag, err := rec.ReadDAG(th.Context)
require.NoError(t, err)
require.NotNil(t, dag)
require.Equal(t, *rec.dag, *dag)
})
}

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"bufio"
@ -14,9 +14,9 @@ import (
"time"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/filecache"
"github.com/dagu-org/dagu/internal/runstore"
)
// Error definitions for common issues
@ -29,17 +29,20 @@ var (
ErrContextCanceled = errors.New("operation canceled by context")
)
var _ persistence.Record = (*Record)(nil)
// DAGDefinition is the name of the file where the DAG definition is stored.
const DAGDefinition = "dag.json"
var _ runstore.Record = (*Record)(nil)
// Record manages an append-only status file with read, write, and compaction capabilities.
// It provides thread-safe operations and supports metrics collection.
type Record struct {
file string // Path to the status file
writer *Writer // Writer for appending status updates
mu sync.RWMutex // Mutex for thread safety
cache *filecache.Cache[*persistence.Status] // Optional cache for read operations
isClosing atomic.Bool // Flag to prevent writes during Close/Compact
dag *digraph.DAG // DAG associated with the status file
file string // Path to the status file
writer *Writer // Writer for appending status updates
mu sync.RWMutex // Mutex for thread safety
cache *fileutil.Cache[*runstore.Status] // Optional cache for read operations
isClosing atomic.Bool // Flag to prevent writes during Close/Compact
dag *digraph.DAG // DAG associated with the status file
}
// RecordOption defines a functional option for configuring a Record.
@ -54,7 +57,7 @@ func WithDAG(dag *digraph.DAG) RecordOption {
}
// NewRecord creates a new HistoryRecord for the specified file.
func NewRecord(file string, cache *filecache.Cache[*persistence.Status], opts ...RecordOption) *Record {
func NewRecord(file string, cache *fileutil.Cache[*runstore.Status], opts ...RecordOption) *Record {
r := &Record{file: file, cache: cache}
for _, opt := range opts {
opt(r)
@ -78,6 +81,43 @@ func (r *Record) ModTime() (time.Time, error) {
return info.ModTime(), nil
}
// ReadDAG implements runstore.Record.
func (r *Record) ReadDAG(ctx context.Context) (*digraph.DAG, error) {
// Check for context cancellation
select {
case <-ctx.Done():
return nil, fmt.Errorf("%w: %v", ErrContextCanceled, ctx.Err())
default:
// Continue with operation
}
// Determine the path to the DAG definition file
dir := filepath.Dir(r.file)
dagFile := filepath.Join(dir, DAGDefinition)
// Check if the file exists
if _, err := os.Stat(dagFile); err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("DAG definition file not found: %w", err)
}
return nil, fmt.Errorf("failed to access DAG definition file: %w", err)
}
// Read the file
data, err := os.ReadFile(dagFile) //nolint:gosec
if err != nil {
return nil, fmt.Errorf("failed to read DAG definition file: %w", err)
}
// Parse the JSON data
var dag digraph.DAG
if err := json.Unmarshal(data, &dag); err != nil {
return nil, fmt.Errorf("failed to unmarshal DAG definition: %w", err)
}
return &dag, nil
}
// Open initializes the status file for writing. It returns an error if the file is already open.
// The context can be used to cancel the operation.
func (r *Record) Open(ctx context.Context) error {
@ -106,10 +146,10 @@ func (r *Record) Open(ctx context.Context) error {
if r.dag != nil {
dagJSON, err := json.Marshal(r.dag)
if err != nil {
return fmt.Errorf("failed to marshal DAG metadata: %w", err)
return fmt.Errorf("failed to marshal DAG definition: %w", err)
}
if err := os.WriteFile(filepath.Join(dir, "dag.json"), dagJSON, 0600); err != nil {
return fmt.Errorf("failed to write DAG metadata: %w", err)
if err := os.WriteFile(filepath.Join(dir, DAGDefinition), dagJSON, 0600); err != nil {
return fmt.Errorf("failed to write DAG definition: %w", err)
}
}
@ -127,7 +167,7 @@ func (r *Record) Open(ctx context.Context) error {
// Write adds a new status record to the file. It returns an error if the file is not open
// or is currently being closed. The context can be used to cancel the operation.
func (r *Record) Write(ctx context.Context, status persistence.Status) error {
func (r *Record) Write(ctx context.Context, status runstore.Status) error {
// Check if we're closing before acquiring the mutex to reduce contention
if r.isClosing.Load() {
return fmt.Errorf("cannot write while file is closing: %w", ErrStatusFileNotOpen)
@ -296,25 +336,7 @@ func safeRename(source, target string) error {
// ReadStatus reads the latest status from the file, using cache if available.
// The context can be used to cancel the operation.
func (r *Record) ReadStatus(ctx context.Context) (*persistence.Status, error) {
// Check for context cancellation
select {
case <-ctx.Done():
return nil, fmt.Errorf("%w: %v", ErrContextCanceled, ctx.Err())
default:
// Continue with operation
}
run, err := r.ReadRun(ctx)
if err != nil {
return nil, err
}
return &run.Status, nil
}
// ReadRun returns the full status file information, including the file path.
// The context can be used to cancel the operation.
func (r *Record) ReadRun(ctx context.Context) (*persistence.Run, error) {
func (r *Record) ReadStatus(ctx context.Context) (*runstore.Status, error) {
// Check for context cancellation
select {
case <-ctx.Done():
@ -325,14 +347,14 @@ func (r *Record) ReadRun(ctx context.Context) (*persistence.Run, error) {
// Try to use cache first if available
if r.cache != nil {
status, cacheErr := r.cache.LoadLatest(r.file, func() (*persistence.Status, error) {
status, cacheErr := r.cache.LoadLatest(r.file, func() (*runstore.Status, error) {
r.mu.RLock()
defer r.mu.RUnlock()
return r.parseLocked()
})
if cacheErr == nil {
return persistence.NewRun(r.file, *status), nil
return status, nil
}
}
@ -345,18 +367,19 @@ func (r *Record) ReadRun(ctx context.Context) (*persistence.Run, error) {
return nil, fmt.Errorf("failed to parse status file: %w", parseErr)
}
return persistence.NewRun(r.file, *parsed), nil
return parsed, nil
}
// parseLocked reads the status file and returns the last valid status.
// Must be called with a lock (read or write) already held.
func (r *Record) parseLocked() (*persistence.Status, error) {
func (r *Record) parseLocked() (*runstore.Status, error) {
return ParseStatusFile(r.file)
}
// ParseStatusFile reads the status file and returns the last valid status.
// The bufferSize parameter controls the size of the read buffer.
func ParseStatusFile(file string) (*persistence.Status, error) {
func ParseStatusFile(file string) (*runstore.Status, error) {
f, err := os.Open(file) //nolint:gosec
if err != nil {
return nil, fmt.Errorf("%w: %v", ErrReadFailed, err)
@ -367,7 +390,7 @@ func ParseStatusFile(file string) (*persistence.Status, error) {
var (
offset int64
result *persistence.Status
result *runstore.Status
)
// Read append-only file from the beginning and find the last status
@ -384,7 +407,7 @@ func ParseStatusFile(file string) (*persistence.Status, error) {
offset = nextOffset
if len(line) > 0 {
status, err := persistence.StatusFromJSON(string(line))
status, err := runstore.StatusFromJSON(string(line))
if err == nil {
result = status
}

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"context"
@ -11,7 +11,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/stringutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -99,9 +99,9 @@ func TestHistoryRecord_Read(t *testing.T) {
hr := NewRecord(file, nil)
// Read status - should get the last entry (test2)
run, err := hr.ReadRun(context.Background())
status, err := hr.ReadStatus(context.Background())
assert.NoError(t, err)
assert.Equal(t, scheduler.StatusSuccess.String(), run.Status.Status.String())
assert.Equal(t, scheduler.StatusSuccess.String(), status.Status.String())
// Read using ReadStatus
latestStatus, err := hr.ReadStatus(context.Background())
@ -120,7 +120,6 @@ func TestHistoryRecord_Compact(t *testing.T) {
if i == 9 {
// Make some status changes to create different records
status.Status = scheduler.StatusSuccess
status.StatusText = scheduler.StatusSuccess.String()
}
if i == 0 {
@ -370,17 +369,16 @@ func createTestDAG() *digraph.DAG {
}
// createTestStatus creates a sample status for testing using StatusFactory
func createTestStatus(status scheduler.Status) persistence.Status {
func createTestStatus(status scheduler.Status) runstore.Status {
dag := createTestDAG()
return persistence.Status{
RequestID: "test",
Name: dag.Name,
Status: status,
StatusText: status.String(),
PID: persistence.PID(12345),
StartedAt: stringutil.FormatTime(time.Now()),
Nodes: persistence.FromSteps(dag.Steps),
return runstore.Status{
RequestID: "test",
Name: dag.Name,
Status: status,
PID: runstore.PID(12345),
StartedAt: stringutil.FormatTime(time.Now()),
Nodes: runstore.FromSteps(dag.Steps),
}
}

View File

@ -1,7 +1,8 @@
package jsondb
package filestore
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -9,15 +10,27 @@ import (
"sort"
"time"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/filecache"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/runstore"
)
const (
// SubRunsDir is the name of the directory where sub-runs are stored.
SubRunsDir = "subs"
// Error definitions for directory structure validation
var (
ErrInvalidRunDir = errors.New("invalid run directory")
)
// SubRunsDir is the name of the directory where status files for sub DAGs are stored.
const SubRunsDir = "subs"
// SubRunsDirPrefix is the prefix for sub-run directories.
const SubRunsDirPrefix = "sub_"
// JSONLStatusFile is the name of the status file for each execution attempt.
// It contains the status of the DAG run in JSON Lines format.
// While running the DAG, new lines are appended to this file on each status update.
// After finishing the run, this file will be compacted into a single JSON line file.
const JSONLStatusFile = "status.jsonl"
// Run represents a single run of a DAG with its associated timestamp and request ID.
type Run struct {
baseDir string // Base directory path for this run
@ -31,7 +44,6 @@ func NewRun(dir string) (*Run, error) {
// Determine if the run is a sub-run
parentDir := filepath.Dir(dir)
if filepath.Base(parentDir) == SubRunsDir {
// Sub-workflow run
matches := reRunSub.FindStringSubmatch(filepath.Base(dir))
if len(matches) != 2 {
return nil, ErrInvalidRunDir
@ -59,7 +71,7 @@ func NewRun(dir string) (*Run, error) {
// CreateRecord creates a new record for this run with the given timestamp.
// It creates a new attempt directory and initializes a record within it.
func (e Run) CreateRecord(_ context.Context, ts TimeInUTC, cache *filecache.Cache[*persistence.Status], opts ...RecordOption) (*Record, error) {
func (e Run) CreateRecord(_ context.Context, ts TimeInUTC, cache *fileutil.Cache[*runstore.Status], opts ...RecordOption) (*Record, error) {
dirName := "attempt_" + formatAttemptTimestamp(ts)
dir := filepath.Join(e.baseDir, dirName)
// Error if the directory already exists
@ -69,12 +81,12 @@ func (e Run) CreateRecord(_ context.Context, ts TimeInUTC, cache *filecache.Cach
if err := os.MkdirAll(dir, 0750); err != nil {
return nil, fmt.Errorf("failed to create attempt directory: %w", err)
}
return NewRecord(filepath.Join(dir, "status.json"), cache, opts...), nil
return NewRecord(filepath.Join(dir, JSONLStatusFile), cache, opts...), nil
}
// CreateSubRun creates a new sub-run with the given timestamp and request ID.
func (e Run) CreateSubRun(_ context.Context, reqID string) (*Run, error) {
dirName := "sub_" + reqID
dirName := "child_" + reqID
dir := filepath.Join(e.baseDir, SubRunsDir, dirName)
if err := os.MkdirAll(dir, 0750); err != nil {
return nil, fmt.Errorf("failed to create sub-run directory: %w", err)
@ -84,13 +96,13 @@ func (e Run) CreateSubRun(_ context.Context, reqID string) (*Run, error) {
// FindSubRun searches for a sub-run with the specified request ID.
func (e Run) FindSubRun(_ context.Context, reqID string) (*Run, error) {
globPattern := filepath.Join(e.baseDir, SubRunsDir, "sub_"+reqID)
globPattern := filepath.Join(e.baseDir, SubRunsDir, "child_"+reqID)
matches, err := filepath.Glob(globPattern)
if err != nil {
return nil, fmt.Errorf("failed to list sub-run directories: %w", err)
}
if len(matches) == 0 {
return nil, persistence.ErrRequestIDNotFound
return nil, runstore.ErrRequestIDNotFound
}
// Sort the matches by timestamp
sort.Slice(matches, func(i, j int) bool {
@ -101,19 +113,19 @@ func (e Run) FindSubRun(_ context.Context, reqID string) (*Run, error) {
// LatestRecord returns the most recent record for this run.
// It searches through all attempt directories and returns the first valid record found.
func (e Run) LatestRecord(_ context.Context, cache *filecache.Cache[*persistence.Status]) (*Record, error) {
func (e Run) LatestRecord(_ context.Context, cache *fileutil.Cache[*runstore.Status]) (*Record, error) {
attempts, err := listDirsSorted(e.baseDir, true, reAttempt)
if err != nil {
return nil, err
}
// Return the first valid attempt
for _, attempt := range attempts {
record := NewRecord(filepath.Join(e.baseDir, attempt, "status.json"), cache)
record := NewRecord(filepath.Join(e.baseDir, attempt, JSONLStatusFile), cache)
if record.Exists() {
return record, nil
}
}
return nil, persistence.ErrNoStatusData
return nil, runstore.ErrNoStatusData
}
// LastUpdated returns the last modification time of the latest record.
@ -134,7 +146,7 @@ func (e Run) Remove() error {
// Regular expressions for parsing directory names
var reRun = regexp.MustCompile(`^run_(\d{8}_\d{6}Z)_(.*)$`) // Matches runs directory names
var reAttempt = regexp.MustCompile(`^attempt_(\d{8}_\d{6}_\d{3}Z)$`) // Matches attempt directory names
var reRunSub = regexp.MustCompile(`^sub_(.*)$`) // Matches sub-run directory names
var reRunSub = regexp.MustCompile(`^child_(.*)$`) // Matches sub-run directory names
// formatRunTimestamp formats a TimeInUTC instance into a string representation (without milliseconds).
// The format is "YYYYMMDD_HHMMSSZ".

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"os"
@ -7,7 +7,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/stretchr/testify/require"
)
@ -62,7 +62,7 @@ func (et ExecutionTest) WriteStatus(t *testing.T, ts TimeInUTC, s scheduler.Stat
t.Helper()
dag := &digraph.DAG{Name: "test-dag"}
status := persistence.NewStatusFactory(dag).Default()
status := runstore.InitialStatus(dag)
status.RequestID = "test-id-1"
status.Status = s

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"context"
@ -9,14 +9,14 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type JSONDBTest struct {
Context context.Context
DB *JSONDB
DB *fileStore
tmpDir string
}
@ -40,7 +40,7 @@ func (th JSONDBTest) CreateRecord(t *testing.T, ts time.Time, requestID string,
t.Helper()
dag := th.DAG("test_DAG")
record, err := th.DB.NewRecord(th.Context, dag.DAG, ts, requestID, persistence.NewRecordOptions{})
record, err := th.DB.NewRecord(th.Context, dag.DAG, ts, requestID, runstore.NewRecordOptions{})
require.NoError(t, err)
err = record.Open(th.Context)
@ -50,7 +50,7 @@ func (th JSONDBTest) CreateRecord(t *testing.T, ts time.Time, requestID string,
_ = record.Close(th.Context)
}()
status := persistence.NewStatusFactory(dag.DAG).Default()
status := runstore.InitialStatus(dag.DAG)
status.RequestID = requestID
status.Status = s
@ -101,7 +101,7 @@ func (d DAGTest) Writer(t *testing.T, requestID string, startedAt time.Time) Wri
}
}
func (w WriterTest) Write(t *testing.T, status persistence.Status) {
func (w WriterTest) Write(t *testing.T, status runstore.Status) {
t.Helper()
err := w.Writer.write(status)

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import "time"

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"bufio"
@ -12,7 +12,7 @@ import (
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
)
// WriterState represents the current state of a writer
@ -88,7 +88,7 @@ func (w *Writer) Open() error {
// Write serializes the status to JSON and appends it to the file.
// It automatically flushes data to ensure durability.
func (w *Writer) Write(ctx context.Context, st persistence.Status) error {
func (w *Writer) Write(ctx context.Context, st runstore.Status) error {
// Add context info to logs if write fails
if err := w.write(st); err != nil {
logger.Errorf(ctx, "Failed to write status: %v", err)
@ -98,7 +98,7 @@ func (w *Writer) Write(ctx context.Context, st persistence.Status) error {
return nil
}
func (w *Writer) write(st persistence.Status) error {
func (w *Writer) write(st runstore.Status) error {
w.mu.Lock()
defer w.mu.Unlock()

View File

@ -1,4 +1,4 @@
package jsondb
package filestore
import (
"context"
@ -7,7 +7,7 @@ import (
"time"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -19,7 +19,7 @@ func TestWriter(t *testing.T) {
t.Run("WriteStatusToNewFile", func(t *testing.T) {
dag := th.DAG("test_write_status")
requestID := uuid.Must(uuid.NewV7()).String()
status := persistence.NewStatusFactory(dag.DAG).Create(
status := runstore.NewStatusBuilder(dag.DAG).Create(
requestID, scheduler.StatusRunning, 1, time.Now(),
)
writer := dag.Writer(t, requestID, time.Now())
@ -35,7 +35,7 @@ func TestWriter(t *testing.T) {
writer := dag.Writer(t, requestID, startedAt)
status := persistence.NewStatusFactory(dag.DAG).Create(
status := runstore.NewStatusBuilder(dag.DAG).Create(
requestID, scheduler.StatusCancel, 1, time.Now(),
)
@ -84,7 +84,7 @@ func TestWriterErrorHandling(t *testing.T) {
dag := th.DAG("test_write_to_closed_writer")
requestID := uuid.Must(uuid.NewV7()).String()
status := persistence.NewStatusFactory(dag.DAG).Create(requestID, scheduler.StatusRunning, 1, time.Now())
status := runstore.NewStatusBuilder(dag.DAG).Create(requestID, scheduler.StatusRunning, 1, time.Now())
assert.Error(t, writer.write(status))
})
@ -103,7 +103,7 @@ func TestWriterRename(t *testing.T) {
dag := th.DAG("test_rename_old")
writer := dag.Writer(t, "request-id-1", time.Now())
requestID := uuid.Must(uuid.NewV7()).String()
status := persistence.NewStatusFactory(dag.DAG).Create(requestID, scheduler.StatusRunning, 1, time.Now())
status := runstore.NewStatusBuilder(dag.DAG).Create(requestID, scheduler.StatusRunning, 1, time.Now())
writer.Write(t, status)
writer.Close(t)
require.FileExists(t, writer.FilePath)

View File

@ -1,8 +1,7 @@
package persistence
package runstore
import (
"errors"
"fmt"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
@ -29,34 +28,44 @@ func FromNodes(nodes []scheduler.NodeData) []*Node {
// FromNode converts a single scheduler NodeData to a persistence Node
func FromNode(node scheduler.NodeData) *Node {
subRuns := make([]SubRun, len(node.State.SubRuns))
for i, subRun := range node.State.SubRuns {
subRuns[i] = SubRun(subRun)
}
var errText string
if node.State.Error != nil {
errText = node.State.Error.Error()
}
return &Node{
Step: node.Step,
Log: node.State.Log,
StartedAt: stringutil.FormatTime(node.State.StartedAt),
FinishedAt: stringutil.FormatTime(node.State.FinishedAt),
Status: node.State.Status,
StatusText: node.State.Status.String(),
RetriedAt: stringutil.FormatTime(node.State.RetriedAt),
RetryCount: node.State.RetryCount,
DoneCount: node.State.DoneCount,
Error: errText(node.State.Error),
RequestID: node.State.RequestID,
Error: errText,
SubRuns: subRuns,
}
}
// Node represents a DAG step with its execution state for persistence
type Node struct {
Step digraph.Step `json:"Step"`
Log string `json:"Log"`
StartedAt string `json:"StartedAt"`
FinishedAt string `json:"FinishedAt"`
Status scheduler.NodeStatus `json:"Status"`
RetriedAt string `json:"RetriedAt,omitempty"`
RetryCount int `json:"RetryCount,omitempty"`
DoneCount int `json:"DoneCount,omitempty"`
Error string `json:"Error,omitempty"`
StatusText string `json:"StatusText"`
RequestID string `json:"RequestId,omitempty"`
Step digraph.Step `json:"step"`
Log string `json:"log"`
StartedAt string `json:"startedAt"`
FinishedAt string `json:"finishedAt"`
Status scheduler.NodeStatus `json:"status"`
RetriedAt string `json:"retriedAt,omitempty"`
RetryCount int `json:"retryCount,omitempty"`
DoneCount int `json:"doneCount,omitempty"`
Error string `json:"error,omitempty"`
SubRuns []SubRun `json:"subRuns,omitempty"`
}
type SubRun struct {
RequestID string `json:"requestId,omitempty"`
}
// ToNode converts a persistence Node back to a scheduler Node
@ -64,6 +73,10 @@ func (n *Node) ToNode() *scheduler.Node {
startedAt, _ := stringutil.ParseTime(n.StartedAt)
finishedAt, _ := stringutil.ParseTime(n.FinishedAt)
retriedAt, _ := stringutil.ParseTime(n.RetriedAt)
subRuns := make([]scheduler.SubRun, len(n.SubRuns))
for i, r := range n.SubRuns {
subRuns[i] = scheduler.SubRun(r)
}
return scheduler.NewNode(n.Step, scheduler.NodeState{
Status: n.Status,
Log: n.Log,
@ -72,8 +85,8 @@ func (n *Node) ToNode() *scheduler.Node {
RetriedAt: retriedAt,
RetryCount: n.RetryCount,
DoneCount: n.DoneCount,
Error: errFromText(n.Error),
RequestID: n.RequestID,
Error: errors.New(n.Error),
SubRuns: subRuns,
})
}
@ -84,24 +97,5 @@ func NewNode(step digraph.Step) *Node {
StartedAt: "-",
FinishedAt: "-",
Status: scheduler.NodeStatusNone,
StatusText: scheduler.NodeStatusNone.String(),
}
}
var errNodeProcessing = errors.New("node processing error")
// errFromText converts an error string to an error, wrapped with errNodeProcessing
func errFromText(err string) error {
if err == "" {
return nil
}
return fmt.Errorf("%w: %s", errNodeProcessing, err)
}
// errText extracts the error message from an error or returns empty string if nil
func errText(err error) string {
if err == nil {
return ""
}
return err.Error()
}

View File

@ -1,4 +1,4 @@
package persistence
package runstore
import (
"encoding/json"
@ -11,30 +11,29 @@ import (
"github.com/dagu-org/dagu/internal/stringutil"
)
// StatusFactory creates Status objects for a specific DAG
type StatusFactory struct {
// StatusBuilder creates Status objects for a specific DAG
type StatusBuilder struct {
dag *digraph.DAG // The DAG for which to create status objects
}
// NewStatusFactory creates a new StatusFactory for the specified DAG
func NewStatusFactory(dag *digraph.DAG) *StatusFactory {
return &StatusFactory{dag: dag}
// NewStatusBuilder creates a new StatusFactory for the specified DAG
func NewStatusBuilder(dag *digraph.DAG) *StatusBuilder {
return &StatusBuilder{dag: dag}
}
// Default creates a default Status object for the DAG with initial values
func (f *StatusFactory) Default() Status {
// InitialStatus creates an initial Status object for the given DAG
func InitialStatus(dag *digraph.DAG) Status {
return Status{
Name: f.dag.GetName(),
Name: dag.GetName(),
Status: scheduler.StatusNone,
StatusText: scheduler.StatusNone.String(),
PID: PID(0),
Nodes: FromSteps(f.dag.Steps),
OnExit: nodeOrNil(f.dag.HandlerOn.Exit),
OnSuccess: nodeOrNil(f.dag.HandlerOn.Success),
OnFailure: nodeOrNil(f.dag.HandlerOn.Failure),
OnCancel: nodeOrNil(f.dag.HandlerOn.Cancel),
Params: strings.Join(f.dag.Params, " "),
ParamsList: f.dag.Params,
Nodes: FromSteps(dag.Steps),
OnExit: nodeOrNil(dag.HandlerOn.Exit),
OnSuccess: nodeOrNil(dag.HandlerOn.Success),
OnFailure: nodeOrNil(dag.HandlerOn.Failure),
OnCancel: nodeOrNil(dag.HandlerOn.Cancel),
Params: strings.Join(dag.Params, " "),
ParamsList: dag.Params,
StartedAt: stringutil.FormatTime(time.Time{}),
FinishedAt: stringutil.FormatTime(time.Time{}),
}
@ -61,7 +60,7 @@ func WithNodes(nodes []scheduler.NodeData) StatusOption {
// WithFinishedAt returns a StatusOption that sets the finished time
func WithFinishedAt(t time.Time) StatusOption {
return func(s *Status) {
s.FinishedAt = FormatTime(t)
s.FinishedAt = formatTime(t)
}
}
@ -109,19 +108,18 @@ func WithLogFilePath(logFilePath string) StatusOption {
}
// Create builds a Status object for a DAG run with the specified parameters
func (f *StatusFactory) Create(
func (f *StatusBuilder) Create(
requestID string,
status scheduler.Status,
pid int,
startedAt time.Time,
opts ...StatusOption,
) Status {
statusObj := f.Default()
statusObj := InitialStatus(f.dag)
statusObj.RequestID = requestID
statusObj.Status = status
statusObj.StatusText = status.String()
statusObj.PID = PID(pid)
statusObj.StartedAt = FormatTime(startedAt)
statusObj.StartedAt = formatTime(startedAt)
for _, opt := range opts {
opt(&statusObj)
@ -142,31 +140,54 @@ func StatusFromJSON(s string) (*Status, error) {
// Status represents the complete execution state of a DAG run
type Status struct {
RootDAGName string `json:"RootDAGName,omitempty"`
RootRequestID string `json:"RootRequestId,omitempty"`
RequestID string `json:"RequestId,omitempty"`
Name string `json:"Name,omitempty"`
Status scheduler.Status `json:"Status"`
StatusText string `json:"StatusText"`
PID PID `json:"Pid,omitempty"`
Nodes []*Node `json:"Nodes,omitempty"`
OnExit *Node `json:"OnExit,omitempty"`
OnSuccess *Node `json:"OnSuccess,omitempty"`
OnFailure *Node `json:"OnFailure,omitempty"`
OnCancel *Node `json:"OnCancel,omitempty"`
StartedAt string `json:"StartedAt,omitempty"`
FinishedAt string `json:"FinishedAt,omitempty"`
Log string `json:"Log,omitempty"`
Params string `json:"Params,omitempty"`
ParamsList []string `json:"ParamsList,omitempty"`
RootDAGName string `json:"rootDAGName,omitempty"`
RootRequestID string `json:"rootRequestId,omitempty"`
RequestID string `json:"requestId,omitempty"`
Name string `json:"name,omitempty"`
Status scheduler.Status `json:"status"`
PID PID `json:"pid,omitempty"`
Nodes []*Node `json:"nodes,omitempty"`
OnExit *Node `json:"onExit,omitempty"`
OnSuccess *Node `json:"onSuccess,omitempty"`
OnFailure *Node `json:"onFailure,omitempty"`
OnCancel *Node `json:"onCancel,omitempty"`
StartedAt string `json:"startedAt,omitempty"`
FinishedAt string `json:"finishedAt,omitempty"`
Log string `json:"log,omitempty"`
Params string `json:"params,omitempty"`
ParamsList []string `json:"paramsList,omitempty"`
}
// SetStatusToErrorIfRunning changes the status to Error if it is currently Running
func (st *Status) SetStatusToErrorIfRunning() {
if st.Status == scheduler.StatusRunning {
st.Status = scheduler.StatusError
st.StatusText = st.Status.String()
// Errors returns a slice of errors for the current status
func (st *Status) Errors() []error {
var errs []error
for _, node := range st.Nodes {
if node.Error != "" {
errs = append(errs, fmt.Errorf("node %s: %s", node.Step.Name, node.Error))
}
}
if st.OnExit != nil && st.OnExit.Error != "" {
errs = append(errs, fmt.Errorf("onExit: %s", st.OnExit.Error))
}
if st.OnSuccess != nil && st.OnSuccess.Error != "" {
errs = append(errs, fmt.Errorf("onSuccess: %s", st.OnSuccess.Error))
}
if st.OnFailure != nil && st.OnFailure.Error != "" {
errs = append(errs, fmt.Errorf("onFailure: %s", st.OnFailure.Error))
}
if st.OnCancel != nil && st.OnCancel.Error != "" {
errs = append(errs, fmt.Errorf("onCancel: %s", st.OnCancel.Error))
}
return errs
}
// RootDAG returns the root DAG object for the current status
func (st *Status) RootDAG() digraph.RootDAG {
if st.RootDAGName == "" || st.RootRequestID == "" {
// If the root DAG name and request ID are not set, it means this is the root DAG
return digraph.NewRootDAG(st.Name, st.RequestID)
}
return digraph.NewRootDAG(st.RootDAGName, st.RootRequestID)
}
// NodesByName returns a slice of nodes with the specified name
@ -202,8 +223,8 @@ func (p PID) String() string {
return fmt.Sprintf("%d", p)
}
// FormatTime formats a time.Time or returns empty string if it's the zero value
func FormatTime(val time.Time) string {
// formatTime formats a time.Time or returns empty string if it's the zero value
func formatTime(val time.Time) string {
if val.IsZero() {
return ""
}

View File

@ -1,4 +1,4 @@
package persistence_test
package runstore_test
import (
"encoding/json"
@ -7,7 +7,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
@ -32,14 +32,14 @@ func TestStatusSerialization(t *testing.T) {
SMTP: &digraph.SMTPConfig{},
}
requestID := uuid.Must(uuid.NewV7()).String()
statusToPersist := persistence.NewStatusFactory(dag).Create(
requestID, scheduler.StatusSuccess, 0, startedAt, persistence.WithFinishedAt(finishedAt),
statusToPersist := runstore.NewStatusBuilder(dag).Create(
requestID, scheduler.StatusSuccess, 0, startedAt, runstore.WithFinishedAt(finishedAt),
)
rawJSON, err := json.Marshal(statusToPersist)
require.NoError(t, err)
statusObject, err := persistence.StatusFromJSON(string(rawJSON))
statusObject, err := runstore.StatusFromJSON(string(rawJSON))
require.NoError(t, err)
require.Equal(t, statusToPersist.Name, statusObject.Name)
@ -47,14 +47,6 @@ func TestStatusSerialization(t *testing.T) {
require.Equal(t, dag.Steps[0].Name, statusObject.Nodes[0].Step.Name)
}
func TestCorrectRunningStatus(t *testing.T) {
dag := &digraph.DAG{Name: "test"}
requestID := uuid.Must(uuid.NewV7()).String()
status := persistence.NewStatusFactory(dag).Create(requestID, scheduler.StatusRunning, 0, time.Now())
status.SetStatusToErrorIfRunning()
require.Equal(t, scheduler.StatusError, status.Status)
}
func TestJsonMarshal(t *testing.T) {
step := digraph.Step{
OutputVariables: &digraph.SyncMap{},

View File

@ -0,0 +1,53 @@
package runstore
import (
"context"
"errors"
"time"
"github.com/dagu-org/dagu/internal/digraph"
)
// Error variables for history operations
var (
ErrRequestIDNotFound = errors.New("request id not found")
ErrNoStatusData = errors.New("no status data")
)
// Store provides an interface for managing the execution data of DAGs.
type Store interface {
// NewRecord creates a new history record for a DAG run
NewRecord(ctx context.Context, dag *digraph.DAG, timestamp time.Time, reqID string, opts NewRecordOptions) (Record, error)
// Recent returns the most recent history records for a DAG, limited by itemLimit
Recent(ctx context.Context, name string, itemLimit int) []Record
// Latest returns the most recent history record for a DAG
Latest(ctx context.Context, name string) (Record, error)
// FindByRequestID finds a history record by its request ID
FindByRequestID(ctx context.Context, name string, reqID string) (Record, error)
// FindBySubRunRequestID finds a sub-run record by its request ID
FindBySubRunRequestID(ctx context.Context, reqID string, rootDAG digraph.RootDAG) (Record, error)
// RemoveOld removes history records older than retentionDays
RemoveOld(ctx context.Context, name string, retentionDays int) error
// Rename renames all history records from oldName to newName
Rename(ctx context.Context, oldName, newName string) error
}
// NewRecordOptions contains options for creating a new history record
type NewRecordOptions struct {
Root *digraph.RootDAG
Retry bool
}
// Record represents a single execution history record that can be read and written
type Record interface {
// Open prepares the record for writing
Open(ctx context.Context) error
// Write updates the record with new status information
Write(ctx context.Context, status Status) error
// Close finalizes any pending operations on the record
Close(ctx context.Context) error
// ReadStatus retrieves the execution status for this record
ReadStatus(ctx context.Context) (*Status, error)
// ReadDAG retrieves the DAG definition for this record
ReadDAG(ctx context.Context) (*digraph.DAG, error)
}

View File

@ -5,11 +5,10 @@ import (
"errors"
"time"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/stringutil"
"github.com/robfig/cron/v3"
)
@ -31,7 +30,7 @@ type DAG struct {
WorkDir string
Next time.Time
Schedule cron.Schedule
Client client.Client
Client runstore.Client
}
// GetDAG returns the DAG associated with this job.
@ -57,11 +56,11 @@ func (job *DAG) Start(ctx context.Context) error {
}
// Job is ready; proceed to start.
return job.Client.StartDAG(ctx, job.DAG, client.StartOptions{Quiet: true})
return job.Client.Start(ctx, job.DAG, runstore.StartOptions{Quiet: true})
}
// Ready checks whether the job can be safely started based on the latest status.
func (job *DAG) Ready(ctx context.Context, latestStatus persistence.Status) error {
func (job *DAG) Ready(ctx context.Context, latestStatus runstore.Status) error {
// Prevent starting if it's already running.
if latestStatus.Status == scheduler.StatusRunning {
return ErrJobRunning
@ -86,7 +85,7 @@ func (job *DAG) Ready(ctx context.Context, latestStatus persistence.Status) erro
// skipIfSuccessful checks if the DAG has already run successfully in the window since the last scheduled time.
// If so, the current run is skipped.
func (job *DAG) skipIfSuccessful(ctx context.Context, latestStatus persistence.Status, latestStartedAt time.Time) error {
func (job *DAG) skipIfSuccessful(ctx context.Context, latestStatus runstore.Status, latestStartedAt time.Time) error {
// If skip is not configured, or the DAG is not currently successful, do nothing.
if !job.DAG.SkipIfSuccessful || latestStatus.Status != scheduler.StatusSuccess {
return nil
@ -118,12 +117,12 @@ func (job *DAG) Stop(ctx context.Context) error {
if latestStatus.Status != scheduler.StatusRunning {
return ErrJobIsNotRunning
}
return job.Client.StopDAG(ctx, job.DAG)
return job.Client.Stop(ctx, job.DAG, "")
}
// Restart restarts the job unconditionally (quiet mode).
func (job *DAG) Restart(ctx context.Context) error {
return job.Client.RestartDAG(ctx, job.DAG, client.RestartOptions{Quiet: true})
return job.Client.Restart(ctx, job.DAG, runstore.RestartOptions{Quiet: true})
}
// String returns a string representation of the job, which is the DAG's name.

View File

@ -9,9 +9,10 @@ import (
"sync"
"time"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/scheduler/filenotify"
"github.com/robfig/cron/v3"
@ -46,18 +47,20 @@ type dagJobManager struct {
targetDir string
registry map[string]*digraph.DAG
lock sync.Mutex
client client.Client
dagClient dagstore.Client
runClient runstore.Client
executable string
workDir string
}
// NewDAGJobManager creates a new DAG manager with the given configuration.
func NewDAGJobManager(dir string, client client.Client, executable, workDir string) JobManager {
func NewDAGJobManager(dir string, dagCli dagstore.Client, runCli runstore.Client, executable, workDir string) JobManager {
return &dagJobManager{
targetDir: dir,
lock: sync.Mutex{},
registry: map[string]*digraph.DAG{},
client: client,
dagClient: dagCli,
runClient: runCli,
executable: executable,
workDir: workDir,
}
@ -81,7 +84,7 @@ func (m *dagJobManager) Next(ctx context.Context, now time.Time) ([]*ScheduledJo
for _, dag := range m.registry {
dagName := strings.TrimSuffix(filepath.Base(dag.Location), filepath.Ext(dag.Location))
if m.client.IsSuspended(ctx, dagName) {
if m.dagClient.IsSuspended(ctx, dagName) {
continue
}
@ -113,7 +116,7 @@ func (m *dagJobManager) createJob(dag *digraph.DAG, next time.Time, schedule cro
WorkDir: m.workDir,
Next: next,
Schedule: schedule,
Client: m.client,
Client: m.runClient,
}
}

View File

@ -5,6 +5,8 @@ import (
"testing"
"time"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/scheduler"
"github.com/stretchr/testify/require"
)
@ -14,7 +16,7 @@ func TestReadEntries(t *testing.T) {
now := expectedNext.Add(-time.Second)
t.Run("InvalidDirectory", func(t *testing.T) {
manager := scheduler.NewDAGJobManager("invalid_directory", nil, "", "")
manager := scheduler.NewDAGJobManager("invalid_directory", dagstore.Client{}, runstore.Client{}, "", "")
jobs, err := manager.Next(context.Background(), expectedNext)
require.NoError(t, err)
require.Len(t, jobs, 0)
@ -54,8 +56,8 @@ func TestReadEntries(t *testing.T) {
job := findJobByName(t, beforeSuspend, "scheduled_job").Job
dagJob, ok := job.(*scheduler.DAG)
require.True(t, ok)
dag := dagJob.DAG
err = th.client.ToggleSuspend(ctx, dag.Name, true)
err = th.dagClient.ToggleSuspend(ctx, dagJob.DAG.Name, true)
require.NoError(t, err)
// check if the job is suspended and not returned

View File

@ -7,7 +7,7 @@ import (
"github.com/dagu-org/dagu/internal/digraph"
pkgsc "github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/runstore"
"github.com/dagu-org/dagu/internal/scheduler"
"github.com/dagu-org/dagu/internal/stringutil"
"github.com/robfig/cron/v3"
@ -158,7 +158,7 @@ func TestJobReady(t *testing.T) {
Next: tt.now,
}
lastRunStatus := persistence.Status{
lastRunStatus := runstore.Status{
Status: tt.lastStatus,
StartedAt: stringutil.FormatTime(tt.lastRunTime),
}

View File

@ -6,12 +6,12 @@ import (
"testing"
"github.com/dagu-org/dagu/internal/build"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/dagstore/filestore"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/persistence/jsondb"
"github.com/dagu-org/dagu/internal/persistence/local"
"github.com/dagu-org/dagu/internal/persistence/local/storage"
"github.com/dagu-org/dagu/internal/runstore"
runfs "github.com/dagu-org/dagu/internal/runstore/filestore"
"github.com/dagu-org/dagu/internal/scheduler"
"github.com/dagu-org/dagu/internal/test"
"github.com/stretchr/testify/require"
@ -36,9 +36,10 @@ func TestMain(m *testing.M) {
}
type testHelper struct {
manager scheduler.JobManager
client client.Client
config *config.Config
manager scheduler.JobManager
runClient runstore.Client
dagClient dagstore.Client
config *config.Config
}
func setupTest(t *testing.T) testHelper {
@ -65,15 +66,16 @@ func setupTest(t *testing.T) testHelper {
},
}
dagStore := local.NewDAGStore(cfg.Paths.DAGsDir)
historyStore := jsondb.New(cfg.Paths.DataDir)
flagStore := local.NewFlagStore(storage.NewStorage(cfg.Paths.SuspendFlagsDir))
cli := client.New(dagStore, historyStore, flagStore, "", cfg.Global.WorkDir)
jobManager := scheduler.NewDAGJobManager(testdataDir, cli, "", "")
dagStore := filestore.New(cfg.Paths.DAGsDir, filestore.WithFlagsBaseDir(cfg.Paths.SuspendFlagsDir))
runStore := runfs.New(cfg.Paths.DataDir)
runCli := runstore.NewClient(runStore, "", cfg.Global.WorkDir)
dagCli := dagstore.NewClient(runCli, dagStore)
jobManager := scheduler.NewDAGJobManager(testdataDir, dagCli, runCli, "", "")
return testHelper{
manager: jobManager,
client: cli,
config: cfg,
manager: jobManager,
dagClient: dagCli,
runClient: runCli,
config: cfg,
}
}

View File

@ -47,7 +47,7 @@ func SetupServer(t *testing.T, opts ...HelperOption) Server {
func (srv *Server) runServer(t *testing.T) {
t.Helper()
server := frontend.NewServer(srv.Config, srv.Helper.Client)
server := frontend.NewServer(srv.Config, srv.DAGClient, srv.RunClient)
err := server.Serve(srv.Context)
require.NoError(t, err, "failed to start server")
}

View File

@ -16,16 +16,15 @@ import (
"time"
"github.com/dagu-org/dagu/internal/agent"
"github.com/dagu-org/dagu/internal/client"
"github.com/dagu-org/dagu/internal/config"
"github.com/dagu-org/dagu/internal/dagstore"
"github.com/dagu-org/dagu/internal/dagstore/filestore"
"github.com/dagu-org/dagu/internal/digraph"
"github.com/dagu-org/dagu/internal/digraph/scheduler"
"github.com/dagu-org/dagu/internal/fileutil"
"github.com/dagu-org/dagu/internal/logger"
"github.com/dagu-org/dagu/internal/persistence"
"github.com/dagu-org/dagu/internal/persistence/jsondb"
"github.com/dagu-org/dagu/internal/persistence/local"
"github.com/dagu-org/dagu/internal/persistence/local/storage"
"github.com/dagu-org/dagu/internal/runstore"
runfs "github.com/dagu-org/dagu/internal/runstore/filestore"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -95,20 +94,19 @@ func Setup(t *testing.T, opts ...HelperOption) Helper {
cfg.Server = *options.ServerConfig
}
dagStore := local.NewDAGStore(cfg.Paths.DAGsDir)
historyStore := jsondb.New(cfg.Paths.DataDir)
flagStore := local.NewFlagStore(
storage.NewStorage(cfg.Paths.SuspendFlagsDir),
)
dagStore := filestore.New(cfg.Paths.DAGsDir, filestore.WithFlagsBaseDir(cfg.Paths.SuspendFlagsDir))
runStore := runfs.New(cfg.Paths.DataDir)
client := client.New(dagStore, historyStore, flagStore, cfg.Paths.Executable, cfg.Global.WorkDir)
runClient := runstore.NewClient(runStore, cfg.Paths.Executable, cfg.Global.WorkDir)
dagClient := dagstore.NewClient(runClient, dagStore)
helper := Helper{
Context: createDefaultContext(),
Config: cfg,
Client: client,
DAGStore: dagStore,
HistoryStore: historyStore,
Context: createDefaultContext(),
Config: cfg,
RunClient: runClient,
DAGClient: dagClient,
DAGStore: dagStore,
RunStore: runStore,
tmpDir: tmpDir,
}
@ -140,9 +138,10 @@ type Helper struct {
Cancel context.CancelFunc
Config *config.Config
LoggingOutput *SyncBuffer
Client client.Client
HistoryStore persistence.HistoryStore
DAGStore persistence.DAGStore
RunClient runstore.Client
DAGClient dagstore.Client
RunStore runstore.Store
DAGStore dagstore.Store
tmpDir string
}
@ -186,44 +185,36 @@ type DAG struct {
func (d *DAG) AssertLatestStatus(t *testing.T, expected scheduler.Status) {
t.Helper()
var status scheduler.Status
var lock sync.Mutex
require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
latest, err := d.Client.GetLatestStatus(d.Context, d.DAG)
require.NoError(t, err)
status = latest.Status
latest, err := d.RunClient.GetLatestStatus(d.Context, d.DAG)
if err != nil {
return false
}
t.Logf("latest status=%s errors=%v", latest.Status.String(), latest.Errors())
return latest.Status == expected
}, time.Second*3, time.Millisecond*50, "expected latest status to be %q, got %q", expected, status)
}, time.Second*3, time.Millisecond*50)
}
func (d *DAG) AssertHistoryCount(t *testing.T, expected int) {
t.Helper()
// the +1 to the limit is needed to ensure that the number of the history
// the +1 to the limit is needed to ensure that the number of therunstore
// entries is exactly the expected number
history := d.Client.GetRecentHistory(d.Context, d.Name, expected+1)
require.Len(t, history, expected)
runstore := d.RunClient.ListRecentHistory(d.Context, d.Name, expected+1)
require.Len(t, runstore, expected)
}
func (d *DAG) AssertCurrentStatus(t *testing.T, expected scheduler.Status) {
t.Helper()
var status scheduler.Status
var lock sync.Mutex
assert.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
curr, err := d.Client.GetCurrentStatus(d.Context, d.DAG)
require.NoError(t, err)
status = curr.Status
curr, _ := d.RunClient.GetRealtimeStatus(d.Context, d.DAG, "")
if curr == nil {
return false
}
t.Logf("current status=%s errors=%v", curr.Status.String(), curr.Errors())
return curr.Status == expected
}, time.Second*3, time.Millisecond*50, "expected current status to be %q, got %q", expected, status)
}, time.Second*3, time.Millisecond*50)
}
// AssertOutputs checks the given outputs against the actual outputs of the DAG
@ -232,7 +223,7 @@ func (d *DAG) AssertCurrentStatus(t *testing.T, expected scheduler.Status) {
func (d *DAG) AssertOutputs(t *testing.T, outputs map[string]any) {
t.Helper()
status, err := d.Client.GetLatestStatus(d.Context, d.DAG)
status, err := d.RunClient.GetLatestStatus(d.Context, d.DAG)
require.NoError(t, err)
// collect the actual outputs from the status
@ -321,9 +312,9 @@ func (d *DAG) Agent(opts ...AgentOption) *Agent {
d.DAG,
logDir,
logFile,
d.Client,
d.RunClient,
d.DAGStore,
d.HistoryStore,
d.RunStore,
rootDAG,
helper.opts,
)

View File

@ -1,3 +1,3 @@
steps:
- name: "1"
command: "sleep 3"
command: "sleep 3"

View File

@ -1,3 +1,3 @@
steps:
- name: "1"
command: "true"
command: "sleep 1"

View File

@ -0,0 +1,3 @@
steps:
- name: "1"
command: "true"

View File

@ -0,0 +1,3 @@
steps:
- name: "1"
run: tree_child

View File

@ -3,6 +3,6 @@ steps:
- name: "1"
script: "echo $1"
- name: "2"
script: "sleep 10"
script: "sleep 100"
depends:
- "1"

View File

@ -1 +1 @@
params: "x $1"
params: "TEST_PARAM $1"

View File

@ -84,7 +84,7 @@ isting, editor, visualization, logs, etc.
- Persistence:
All DAG definitions, run metadata, step logs, and history are stored on disk (by default via
internal/persistence/jsondb` and friends). No reliance on external DB or services.
internal/history/jsondb` and friends). No reliance on external DB or services.
- API + Web Server:

View File

@ -1,4 +0,0 @@
node_modules/*
./node_modules/**
**/node_modules/**
*.js

View File

@ -1,8 +0,0 @@
{
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint"],
"extends": ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
"rules": {
"@typescript-eslint/no-non-null-assertion": "off"
}
}

Some files were not shown because too many files have changed in this diff Show More