feat: implement batch processing for MSSQL with improved structure and logging
This commit is contained in:
110
cmd/go_migrate/batch-generator.go
Normal file
110
cmd/go_migrate/batch-generator.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Batch struct {
|
||||
Id uuid.UUID
|
||||
ParentId uuid.UUID
|
||||
LowerLimit int64
|
||||
UpperLimit int64
|
||||
IsLowerLimitInclusive bool
|
||||
ShouldUseRange bool
|
||||
RetryCounter int
|
||||
}
|
||||
|
||||
func estimateTotalRowsMssql(ctx context.Context, db *sql.DB, job MigrationJob) (int64, error) {
|
||||
query := `
|
||||
SELECT
|
||||
SUM(p.rows) AS count
|
||||
FROM sys.tables t
|
||||
JOIN sys.schemas s ON t.schema_id = s.schema_id
|
||||
JOIN sys.partitions p ON t.object_id = p.object_id
|
||||
WHERE s.name = @schema AND t.name = @table AND p.index_id IN (0, 1)
|
||||
GROUP BY t.name`
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*20)
|
||||
defer cancel()
|
||||
|
||||
var rowsCount int64
|
||||
err := db.QueryRowContext(ctxTimeout, query, sql.Named("schema", job.Schema), sql.Named("table", job.Table)).Scan(&rowsCount)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return rowsCount, nil
|
||||
}
|
||||
|
||||
func calculateBatchesMssql(ctx context.Context, db *sql.DB, job MigrationJob, batchCount int64) ([]Batch, error) {
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
MIN([%s]) AS lower_limit,
|
||||
MAX([%s]) AS upper_limit
|
||||
FROM
|
||||
(SELECT [%s], NTILE(@batchCount) OVER (ORDER BY [%s]) AS batch_id FROM [%s].[%s]) AS T
|
||||
GROUP BY batch_id
|
||||
ORDER BY batch_id`, job.PrimaryKey, job.PrimaryKey, job.PrimaryKey, job.PrimaryKey, job.Schema, job.Table)
|
||||
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, time.Second*20)
|
||||
defer cancel()
|
||||
|
||||
rows, err := db.QueryContext(ctxTimeout, query, sql.Named("batchCount", batchCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
batches := make([]Batch, 0, batchCount)
|
||||
|
||||
for rows.Next() {
|
||||
batch := Batch{
|
||||
Id: uuid.New(),
|
||||
ShouldUseRange: true,
|
||||
RetryCounter: 0,
|
||||
IsLowerLimitInclusive: true,
|
||||
}
|
||||
|
||||
if err := rows.Scan(&batch.LowerLimit, &batch.UpperLimit); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batches = append(batches, batch)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
func batchGeneratorMssql(ctx context.Context, db *sql.DB, job MigrationJob) ([]Batch, error) {
|
||||
rowsCount, err := estimateTotalRowsMssql(ctx, db, job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batchCount int64 = 1
|
||||
if rowsCount > RowsPerBatch {
|
||||
batchCount = rowsCount / RowsPerBatch
|
||||
} else {
|
||||
return []Batch{{
|
||||
Id: uuid.New(),
|
||||
ShouldUseRange: false,
|
||||
RetryCounter: 0,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
batches, err := calculateBatchesMssql(ctx, db, job, batchCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return batches, nil
|
||||
}
|
||||
Reference in New Issue
Block a user