feat: implement exponential backoff strategy for error handling in extractor and loader processes; enhance retry configuration options

This commit is contained in:
2026-04-12 20:35:29 -05:00
parent 5633dc98d0
commit f126d5bbd0
7 changed files with 122 additions and 23 deletions

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"sync"
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/config"
"git.ksdemosapps.com/kylesoda/go-migrate/internal/app/models"
"github.com/google/uuid"
)
@@ -22,7 +23,7 @@ func (e *ExtractorError) Error() string {
func ExtractorErrorHandler(
ctx context.Context,
maxRetryAttempts int,
retryConfig config.RetryConfig,
chErrorsIn <-chan ExtractorError,
chPartitionsOut chan<- models.Partition,
chJobErrorsOut chan<- JobError,
@@ -42,11 +43,11 @@ func ExtractorErrorHandler(
return
}
if err.Partition.RetryCounter >= maxRetryAttempts {
if err.Partition.RetryCounter >= retryConfig.Attempts {
wgActivePartitions.Done()
jobError := JobError{
ShouldCancelJob: false,
Msg: fmt.Sprintf("Partition %v reached max retries (%d)", err.Partition.Id, maxRetryAttempts),
Msg: fmt.Sprintf("Partition %v reached max retries (%d)", err.Partition.Id, retryConfig.Attempts),
Prev: &err,
}
@@ -74,6 +75,13 @@ func ExtractorErrorHandler(
newPartition := err.Partition
newPartition.RetryCounter++
delay := computeBackoffDelay(
newPartition.RetryCounter,
retryConfig.BaseDelayMs,
retryConfig.MaxDelayMs,
retryConfig.MaxJitterMs,
)
if err.HasLastId {
newPartition.ParentId = err.Partition.Id
newPartition.Id = uuid.New()
@@ -81,11 +89,13 @@ func ExtractorErrorHandler(
newPartition.IsLowerLimitInclusive = false
}
select {
case chPartitionsOut <- newPartition:
case <-ctx.Done():
return
}
requeueWithBackoff(ctx, delay, func() {
select {
case chPartitionsOut <- newPartition:
case <-ctx.Done():
return
}
})
}
}
}