feat: refactor error handling to use custom_errors.LoaderError for improved error management
This commit is contained in:
@@ -1,75 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/config"
|
|
||||||
)
|
|
||||||
|
|
||||||
func buildExtractQueryMssql(sourceDbInfo config.SourceTableInfo, columns []ColumnType, includeRange bool, isMinInclusive bool) string {
|
|
||||||
var sbQuery strings.Builder
|
|
||||||
|
|
||||||
sbQuery.WriteString("SELECT ")
|
|
||||||
|
|
||||||
if len(columns) == 0 {
|
|
||||||
sbQuery.WriteString("*")
|
|
||||||
} else {
|
|
||||||
for i, col := range columns {
|
|
||||||
fmt.Fprintf(&sbQuery, "[%s]", col.name)
|
|
||||||
|
|
||||||
if col.unifiedType == "GEOMETRY" {
|
|
||||||
fmt.Fprintf(&sbQuery, ".STAsBinary() AS [%s]", col.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < len(columns)-1 {
|
|
||||||
sbQuery.WriteString(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&sbQuery, " FROM [%s].[%s]", sourceDbInfo.Schema, sourceDbInfo.Table)
|
|
||||||
|
|
||||||
if includeRange {
|
|
||||||
fmt.Fprintf(&sbQuery, " WHERE [%s]", sourceDbInfo.PrimaryKey)
|
|
||||||
if isMinInclusive {
|
|
||||||
sbQuery.WriteString(" >=")
|
|
||||||
} else {
|
|
||||||
sbQuery.WriteString(" >")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&sbQuery, " @min AND [%s] <= @max", sourceDbInfo.PrimaryKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(&sbQuery, " ORDER BY [%s] ASC", sourceDbInfo.PrimaryKey)
|
|
||||||
|
|
||||||
return sbQuery.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildExtractQueryPostgres(sourceDbInfo config.SourceTableInfo, columns []ColumnType) string {
|
|
||||||
var sbColumns strings.Builder
|
|
||||||
|
|
||||||
if len(columns) == 0 {
|
|
||||||
sbColumns.WriteString("*")
|
|
||||||
} else {
|
|
||||||
for i, col := range columns {
|
|
||||||
if col.unifiedType == "GEOMETRY" {
|
|
||||||
sbColumns.WriteString(`ST_AsEWKB("`)
|
|
||||||
sbColumns.WriteString(col.name)
|
|
||||||
sbColumns.WriteString(`") AS "`)
|
|
||||||
sbColumns.WriteString(col.name)
|
|
||||||
sbColumns.WriteString(`"`)
|
|
||||||
} else {
|
|
||||||
sbColumns.WriteString(`"`)
|
|
||||||
sbColumns.WriteString(col.name)
|
|
||||||
sbColumns.WriteString(`"`)
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < len(columns)-1 {
|
|
||||||
sbColumns.WriteString(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(`SELECT %s FROM "%s"."%s" ORDER BY "%s" ASC`, sbColumns.String(), sourceDbInfo.Schema, sourceDbInfo.Table, sourceDbInfo.PrimaryKey)
|
|
||||||
}
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JobError struct {
|
|
||||||
ShouldCancelJob bool
|
|
||||||
Msg string
|
|
||||||
Prev error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *JobError) Error() string {
|
|
||||||
if e.Prev != nil {
|
|
||||||
return fmt.Sprintf("%s: %v", e.Msg, e.Prev)
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.Msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func jobErrorHandler(ctx context.Context, chErrorsIn <-chan JobError) error {
|
|
||||||
for {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
|
||||||
|
|
||||||
case err, ok := <-chErrorsIn:
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err.ShouldCancelJob {
|
|
||||||
log.Error(err.Msg, " - ", err.Prev)
|
|
||||||
return &err
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Error(err.Msg, " - ", err.Prev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -25,7 +25,7 @@ func loadRowsPostgres(
|
|||||||
tableInfo config.TargetTableInfo,
|
tableInfo config.TargetTableInfo,
|
||||||
columns []models.ColumnType,
|
columns []models.ColumnType,
|
||||||
chChunksIn <-chan models.Chunk,
|
chChunksIn <-chan models.Chunk,
|
||||||
chErrorsOut chan<- LoaderError,
|
chErrorsOut chan<- custom_errors.LoaderError,
|
||||||
chJobErrorsOut chan<- custom_errors.JobError,
|
chJobErrorsOut chan<- custom_errors.JobError,
|
||||||
wgActiveChunks *sync.WaitGroup,
|
wgActiveChunks *sync.WaitGroup,
|
||||||
rowsLoaded *int64,
|
rowsLoaded *int64,
|
||||||
@@ -61,7 +61,7 @@ func loadChunkPostgres(
|
|||||||
identifier pgx.Identifier,
|
identifier pgx.Identifier,
|
||||||
colNames []string,
|
colNames []string,
|
||||||
chunk models.Chunk,
|
chunk models.Chunk,
|
||||||
chErrorsOut chan<- LoaderError,
|
chErrorsOut chan<- custom_errors.LoaderError,
|
||||||
chJobErrorsOut chan<- custom_errors.JobError,
|
chJobErrorsOut chan<- custom_errors.JobError,
|
||||||
wgActiveChunks *sync.WaitGroup,
|
wgActiveChunks *sync.WaitGroup,
|
||||||
rowsLoaded *int64,
|
rowsLoaded *int64,
|
||||||
@@ -92,7 +92,7 @@ func loadChunkPostgres(
|
|||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case chErrorsOut <- LoaderError{Chunk: chunk, Msg: err.Error()}:
|
case chErrorsOut <- custom_errors.LoaderError{Chunk: chunk, Msg: err.Error()}:
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ func processMigrationJob(
|
|||||||
chExtractorErrors := make(chan custom_errors.ExtractorError, job.QueueSize)
|
chExtractorErrors := make(chan custom_errors.ExtractorError, job.QueueSize)
|
||||||
chChunksRaw := make(chan models.Chunk, job.QueueSize)
|
chChunksRaw := make(chan models.Chunk, job.QueueSize)
|
||||||
chChunksTransformed := make(chan models.Chunk, job.QueueSize)
|
chChunksTransformed := make(chan models.Chunk, job.QueueSize)
|
||||||
chLoadersErrors := make(chan LoaderError, job.QueueSize)
|
chLoadersErrors := make(chan custom_errors.LoaderError, job.QueueSize)
|
||||||
|
|
||||||
var wgActiveBatches sync.WaitGroup
|
var wgActiveBatches sync.WaitGroup
|
||||||
var wgActiveChunks sync.WaitGroup
|
var wgActiveChunks sync.WaitGroup
|
||||||
@@ -68,7 +68,7 @@ func processMigrationJob(
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
go custom_errors.ExtractorErrorHandler(jobCtx, job.Retry.Attempts, chExtractorErrors, chBatches, chJobErrors, &wgActiveBatches)
|
go custom_errors.ExtractorErrorHandler(jobCtx, job.Retry.Attempts, chExtractorErrors, chBatches, chJobErrors, &wgActiveBatches)
|
||||||
go loaderErrorHandler(jobCtx, job.Retry.Attempts, chLoadersErrors, chChunksTransformed, chJobErrors, &wgActiveChunks)
|
go custom_errors.LoaderErrorHandler(jobCtx, job.Retry.Attempts, chLoadersErrors, chChunksTransformed, chJobErrors, &wgActiveChunks)
|
||||||
|
|
||||||
maxExtractors := min(job.MaxExtractors, len(batches))
|
maxExtractors := min(job.MaxExtractors, len(batches))
|
||||||
log.Infof("Starting %d extractor(s)...", maxExtractors)
|
log.Infof("Starting %d extractor(s)...", maxExtractors)
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
package main
|
package custom_errors
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/custom_errors"
|
|
||||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/models"
|
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/models"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -18,12 +17,12 @@ func (e *LoaderError) Error() string {
|
|||||||
return e.Msg
|
return e.Msg
|
||||||
}
|
}
|
||||||
|
|
||||||
func loaderErrorHandler(
|
func LoaderErrorHandler(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
maxRetryAttempts int,
|
maxRetryAttempts int,
|
||||||
chErrorsIn <-chan LoaderError,
|
chErrorsIn <-chan LoaderError,
|
||||||
chChunksOut chan<- models.Chunk,
|
chChunksOut chan<- models.Chunk,
|
||||||
chJobErrorsOut chan<- custom_errors.JobError,
|
chJobErrorsOut chan<- JobError,
|
||||||
wgActiveChunks *sync.WaitGroup,
|
wgActiveChunks *sync.WaitGroup,
|
||||||
) {
|
) {
|
||||||
for {
|
for {
|
||||||
@@ -41,7 +40,7 @@ func loaderErrorHandler(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err.RetryCounter >= maxRetryAttempts {
|
if err.RetryCounter >= maxRetryAttempts {
|
||||||
jobError := custom_errors.JobError{
|
jobError := JobError{
|
||||||
ShouldCancelJob: false,
|
ShouldCancelJob: false,
|
||||||
Msg: fmt.Sprintf("chunk %v reached max retries (%d)", err.Id, maxRetryAttempts),
|
Msg: fmt.Sprintf("chunk %v reached max retries (%d)", err.Id, maxRetryAttempts),
|
||||||
Prev: &err,
|
Prev: &err,
|
||||||
Reference in New Issue
Block a user