feat: refactor error handling to use custom_errors.LoaderError for improved error management
This commit is contained in:
@@ -1,75 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/config"
|
||||
)
|
||||
|
||||
func buildExtractQueryMssql(sourceDbInfo config.SourceTableInfo, columns []ColumnType, includeRange bool, isMinInclusive bool) string {
|
||||
var sbQuery strings.Builder
|
||||
|
||||
sbQuery.WriteString("SELECT ")
|
||||
|
||||
if len(columns) == 0 {
|
||||
sbQuery.WriteString("*")
|
||||
} else {
|
||||
for i, col := range columns {
|
||||
fmt.Fprintf(&sbQuery, "[%s]", col.name)
|
||||
|
||||
if col.unifiedType == "GEOMETRY" {
|
||||
fmt.Fprintf(&sbQuery, ".STAsBinary() AS [%s]", col.name)
|
||||
}
|
||||
|
||||
if i < len(columns)-1 {
|
||||
sbQuery.WriteString(", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(&sbQuery, " FROM [%s].[%s]", sourceDbInfo.Schema, sourceDbInfo.Table)
|
||||
|
||||
if includeRange {
|
||||
fmt.Fprintf(&sbQuery, " WHERE [%s]", sourceDbInfo.PrimaryKey)
|
||||
if isMinInclusive {
|
||||
sbQuery.WriteString(" >=")
|
||||
} else {
|
||||
sbQuery.WriteString(" >")
|
||||
}
|
||||
|
||||
fmt.Fprintf(&sbQuery, " @min AND [%s] <= @max", sourceDbInfo.PrimaryKey)
|
||||
}
|
||||
|
||||
fmt.Fprintf(&sbQuery, " ORDER BY [%s] ASC", sourceDbInfo.PrimaryKey)
|
||||
|
||||
return sbQuery.String()
|
||||
}
|
||||
|
||||
func buildExtractQueryPostgres(sourceDbInfo config.SourceTableInfo, columns []ColumnType) string {
|
||||
var sbColumns strings.Builder
|
||||
|
||||
if len(columns) == 0 {
|
||||
sbColumns.WriteString("*")
|
||||
} else {
|
||||
for i, col := range columns {
|
||||
if col.unifiedType == "GEOMETRY" {
|
||||
sbColumns.WriteString(`ST_AsEWKB("`)
|
||||
sbColumns.WriteString(col.name)
|
||||
sbColumns.WriteString(`") AS "`)
|
||||
sbColumns.WriteString(col.name)
|
||||
sbColumns.WriteString(`"`)
|
||||
} else {
|
||||
sbColumns.WriteString(`"`)
|
||||
sbColumns.WriteString(col.name)
|
||||
sbColumns.WriteString(`"`)
|
||||
}
|
||||
|
||||
if i < len(columns)-1 {
|
||||
sbColumns.WriteString(", ")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`SELECT %s FROM "%s"."%s" ORDER BY "%s" ASC`, sbColumns.String(), sourceDbInfo.Schema, sourceDbInfo.Table, sourceDbInfo.PrimaryKey)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type JobError struct {
|
||||
ShouldCancelJob bool
|
||||
Msg string
|
||||
Prev error
|
||||
}
|
||||
|
||||
func (e *JobError) Error() string {
|
||||
if e.Prev != nil {
|
||||
return fmt.Sprintf("%s: %v", e.Msg, e.Prev)
|
||||
}
|
||||
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
func jobErrorHandler(ctx context.Context, chErrorsIn <-chan JobError) error {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
|
||||
case err, ok := <-chErrorsIn:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err.ShouldCancelJob {
|
||||
log.Error(err.Msg, " - ", err.Prev)
|
||||
return &err
|
||||
}
|
||||
|
||||
log.Error(err.Msg, " - ", err.Prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/custom_errors"
|
||||
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/models"
|
||||
)
|
||||
|
||||
type LoaderError struct {
|
||||
models.Chunk
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e *LoaderError) Error() string {
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
func loaderErrorHandler(
|
||||
ctx context.Context,
|
||||
maxRetryAttempts int,
|
||||
chErrorsIn <-chan LoaderError,
|
||||
chChunksOut chan<- models.Chunk,
|
||||
chJobErrorsOut chan<- custom_errors.JobError,
|
||||
wgActiveChunks *sync.WaitGroup,
|
||||
) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
|
||||
case err, ok := <-chErrorsIn:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err.RetryCounter >= maxRetryAttempts {
|
||||
jobError := custom_errors.JobError{
|
||||
ShouldCancelJob: false,
|
||||
Msg: fmt.Sprintf("chunk %v reached max retries (%d)", err.Id, maxRetryAttempts),
|
||||
Prev: &err,
|
||||
}
|
||||
|
||||
select {
|
||||
case chJobErrorsOut <- jobError:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
wgActiveChunks.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
err.RetryCounter++
|
||||
|
||||
select {
|
||||
case chChunksOut <- err.Chunk:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ func loadRowsPostgres(
|
||||
tableInfo config.TargetTableInfo,
|
||||
columns []models.ColumnType,
|
||||
chChunksIn <-chan models.Chunk,
|
||||
chErrorsOut chan<- LoaderError,
|
||||
chErrorsOut chan<- custom_errors.LoaderError,
|
||||
chJobErrorsOut chan<- custom_errors.JobError,
|
||||
wgActiveChunks *sync.WaitGroup,
|
||||
rowsLoaded *int64,
|
||||
@@ -61,7 +61,7 @@ func loadChunkPostgres(
|
||||
identifier pgx.Identifier,
|
||||
colNames []string,
|
||||
chunk models.Chunk,
|
||||
chErrorsOut chan<- LoaderError,
|
||||
chErrorsOut chan<- custom_errors.LoaderError,
|
||||
chJobErrorsOut chan<- custom_errors.JobError,
|
||||
wgActiveChunks *sync.WaitGroup,
|
||||
rowsLoaded *int64,
|
||||
@@ -92,7 +92,7 @@ func loadChunkPostgres(
|
||||
}
|
||||
|
||||
select {
|
||||
case chErrorsOut <- LoaderError{Chunk: chunk, Msg: err.Error()}:
|
||||
case chErrorsOut <- custom_errors.LoaderError{Chunk: chunk, Msg: err.Error()}:
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func processMigrationJob(
|
||||
chExtractorErrors := make(chan custom_errors.ExtractorError, job.QueueSize)
|
||||
chChunksRaw := make(chan models.Chunk, job.QueueSize)
|
||||
chChunksTransformed := make(chan models.Chunk, job.QueueSize)
|
||||
chLoadersErrors := make(chan LoaderError, job.QueueSize)
|
||||
chLoadersErrors := make(chan custom_errors.LoaderError, job.QueueSize)
|
||||
|
||||
var wgActiveBatches sync.WaitGroup
|
||||
var wgActiveChunks sync.WaitGroup
|
||||
@@ -68,7 +68,7 @@ func processMigrationJob(
|
||||
}()
|
||||
|
||||
go custom_errors.ExtractorErrorHandler(jobCtx, job.Retry.Attempts, chExtractorErrors, chBatches, chJobErrors, &wgActiveBatches)
|
||||
go loaderErrorHandler(jobCtx, job.Retry.Attempts, chLoadersErrors, chChunksTransformed, chJobErrors, &wgActiveChunks)
|
||||
go custom_errors.LoaderErrorHandler(jobCtx, job.Retry.Attempts, chLoadersErrors, chChunksTransformed, chJobErrors, &wgActiveChunks)
|
||||
|
||||
maxExtractors := min(job.MaxExtractors, len(batches))
|
||||
log.Infof("Starting %d extractor(s)...", maxExtractors)
|
||||
|
||||
Reference in New Issue
Block a user