diff --git a/cmd/go_migrate/log.go b/cmd/go_migrate/log.go new file mode 100644 index 0000000..0aff8e9 --- /dev/null +++ b/cmd/go_migrate/log.go @@ -0,0 +1,15 @@ +package main + +import ( + "time" + + log "github.com/sirupsen/logrus" +) + +func configureLog() { + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + TimestampFormat: time.StampMilli, + }) + log.SetLevel(log.DebugLevel) +} diff --git a/cmd/go_migrate/main.go b/cmd/go_migrate/main.go index c69c726..6b76845 100644 --- a/cmd/go_migrate/main.go +++ b/cmd/go_migrate/main.go @@ -1,213 +1,12 @@ package main import ( - "context" - "errors" - "fmt" - "strings" - "time" - - "git.ksdemosapps.com/kylesoda/go-migrate/internal/app/config" - "git.ksdemosapps.com/kylesoda/go-migrate/internal/app/db" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgtype" - "github.com/jackc/pgx/v5/pgxpool" log "github.com/sirupsen/logrus" ) func main() { - log.SetFormatter(&log.TextFormatter{ - FullTimestamp: true, - TimestampFormat: time.StampMilli, - }) - log.SetLevel(log.DebugLevel) - + configureLog() log.Info("Starting migration...") - ctxSource, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - sourcePool, err := db.Connect(ctxSource, config.App.SourceDbUrl) - defer db.Close(sourcePool) - if err != nil { - log.Fatal(err) - } - log.Info("Successfully connected to from_db") - - ctxTarget, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - - targetPool, err := db.Connect(ctxTarget, config.App.TargetDbUrl) - defer db.Close(targetPool) - if err != nil { - log.Fatal(err) - } - log.Info("Successfully connected to to_db") - - schema := "test" - table := "migration_test" - colNames := []string{"id", "nombre_producto", "descripcion", "stock", "precio", "es_activo", "fecha_creacion", "ultima_actualizacion", "configuracion_json", "etiquetas", "binario_test", "ip_servidor", "rango_prueba"} - - rowValues, err := extractData(ctxSource, sourcePool, schema, table, colNames, 10000) - - if err != nil { - log.Fatal("Unexpected error when extracting data", err) - } - - // for index, row := range rowValues { - // log.Debugf("Values for row %d", index+1) - // for i, v := range row { - // log.Debugf("%s: %v", colNames[i], v) - // } - // } - - insertedRows, err := insertData(ctxTarget, targetPool, schema, table, colNames, rowValues) - if err != nil { - log.Fatal("Unexpected error when inserting rows: ", err) - } - - log.Infof("Inserted rows: %d", insertedRows) - log.Info("Migration completed successfully!") } - -func buildExtractSqlSentence(schema, table string, colNames []string) string { - var sbColumns strings.Builder - - for i, col := range colNames { - sbColumns.WriteString(`"`) - sbColumns.WriteString(col) - sbColumns.WriteString(`"`) - if i < len(colNames)-1 { - sbColumns.WriteString(", ") - } - } - - return fmt.Sprintf(`SELECT %s FROM "%s"."%s" LIMIT $1`, sbColumns.String(), schema, table) -} - -func extractData(ctx context.Context, sourcePool *pgxpool.Pool, schema string, table string, colNames []string, limit int) ([][]any, error) { - if len(colNames) == 0 { - return nil, errors.New("Empty column names received") - } - - sql := buildExtractSqlSentence(schema, table, colNames) - - log.Debug("Executing query: ", sql) - - rows, err := sourcePool.Query(ctx, sql, limit) - if err != nil { - if !errors.Is(err, pgx.ErrNoRows) { - return nil, fmt.Errorf("Unexpected error: %w", err) - } - - log.Warn("Unexpected error", err) - return [][]any{}, nil - } - defer rows.Close() - - cols := rows.FieldDescriptions() - oids := make([]uint32, len(cols)) - for i, c := range cols { - oids[i] = c.DataTypeOID - } - - rowValues := make([][]any, 0, limit) - - for rows.Next() { - values, _ := rows.Values() - - for i, v := range values { - values[i] = castValueByOID(v, oids[i]) - } - - rowValues = append(rowValues, values) - } - - return rowValues, nil -} - -func castValueByOID(val any, oid uint32) any { - if val == nil { - return nil - } - - switch oid { - case 3904: - if r, ok := val.(pgtype.Range[any]); ok { - newRange := pgtype.Range[int32]{ - LowerType: r.LowerType, - UpperType: r.UpperType, - Valid: r.Valid, - } - if r.Lower != nil { - newRange.Lower = anyToInt32(r.Lower) - } - if r.Upper != nil { - newRange.Upper = anyToInt32(r.Upper) - } - return newRange - } - - case 3906: - if r, ok := val.(pgtype.Range[any]); ok { - newRange := pgtype.Range[pgtype.Numeric]{ - LowerType: r.LowerType, - UpperType: r.UpperType, - Valid: r.Valid, - } - if r.Lower != nil { - newRange.Lower = r.Lower.(pgtype.Numeric) - } - if r.Upper != nil { - newRange.Upper = r.Upper.(pgtype.Numeric) - } - return newRange - } - } - - return val -} - -func anyToInt32(v any) int32 { - switch t := v.(type) { - case int32: - return t - case int64: - return int32(t) - case int: - return int32(t) - case float64: - return int32(t) - default: - log.Warnf("Valor inesperado en rango: %T", v) - return 0 - } -} - -func insertData(ctx context.Context, targetPool *pgxpool.Pool, schema string, table string, colNames []string, rowValues [][]any) (int64, error) { - identifier := pgx.Identifier{schema, table} - - count, err := targetPool.CopyFrom( - ctx, - identifier, - colNames, - pgx.CopyFromRows(rowValues), - ) - - if err != nil { - return 0, fmt.Errorf("error en CopyFrom: %w", err) - } - - return count, nil -} - -func Map[T any, V any](input []T, mapper func(T) V) []V { - result := make([]V, len(input)) - - for i, v := range input { - result[i] = mapper(v) - } - - return result -} diff --git a/scripts/pg-info-test/main.go b/scripts/pg-info-test/main.go new file mode 100644 index 0000000..c69c726 --- /dev/null +++ b/scripts/pg-info-test/main.go @@ -0,0 +1,213 @@ +package main + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "git.ksdemosapps.com/kylesoda/go-migrate/internal/app/config" + "git.ksdemosapps.com/kylesoda/go-migrate/internal/app/db" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" + log "github.com/sirupsen/logrus" +) + +func main() { + log.SetFormatter(&log.TextFormatter{ + FullTimestamp: true, + TimestampFormat: time.StampMilli, + }) + log.SetLevel(log.DebugLevel) + + log.Info("Starting migration...") + + ctxSource, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + sourcePool, err := db.Connect(ctxSource, config.App.SourceDbUrl) + defer db.Close(sourcePool) + if err != nil { + log.Fatal(err) + } + log.Info("Successfully connected to from_db") + + ctxTarget, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + targetPool, err := db.Connect(ctxTarget, config.App.TargetDbUrl) + defer db.Close(targetPool) + if err != nil { + log.Fatal(err) + } + log.Info("Successfully connected to to_db") + + schema := "test" + table := "migration_test" + colNames := []string{"id", "nombre_producto", "descripcion", "stock", "precio", "es_activo", "fecha_creacion", "ultima_actualizacion", "configuracion_json", "etiquetas", "binario_test", "ip_servidor", "rango_prueba"} + + rowValues, err := extractData(ctxSource, sourcePool, schema, table, colNames, 10000) + + if err != nil { + log.Fatal("Unexpected error when extracting data", err) + } + + // for index, row := range rowValues { + // log.Debugf("Values for row %d", index+1) + // for i, v := range row { + // log.Debugf("%s: %v", colNames[i], v) + // } + // } + + insertedRows, err := insertData(ctxTarget, targetPool, schema, table, colNames, rowValues) + if err != nil { + log.Fatal("Unexpected error when inserting rows: ", err) + } + + log.Infof("Inserted rows: %d", insertedRows) + + log.Info("Migration completed successfully!") +} + +func buildExtractSqlSentence(schema, table string, colNames []string) string { + var sbColumns strings.Builder + + for i, col := range colNames { + sbColumns.WriteString(`"`) + sbColumns.WriteString(col) + sbColumns.WriteString(`"`) + if i < len(colNames)-1 { + sbColumns.WriteString(", ") + } + } + + return fmt.Sprintf(`SELECT %s FROM "%s"."%s" LIMIT $1`, sbColumns.String(), schema, table) +} + +func extractData(ctx context.Context, sourcePool *pgxpool.Pool, schema string, table string, colNames []string, limit int) ([][]any, error) { + if len(colNames) == 0 { + return nil, errors.New("Empty column names received") + } + + sql := buildExtractSqlSentence(schema, table, colNames) + + log.Debug("Executing query: ", sql) + + rows, err := sourcePool.Query(ctx, sql, limit) + if err != nil { + if !errors.Is(err, pgx.ErrNoRows) { + return nil, fmt.Errorf("Unexpected error: %w", err) + } + + log.Warn("Unexpected error", err) + return [][]any{}, nil + } + defer rows.Close() + + cols := rows.FieldDescriptions() + oids := make([]uint32, len(cols)) + for i, c := range cols { + oids[i] = c.DataTypeOID + } + + rowValues := make([][]any, 0, limit) + + for rows.Next() { + values, _ := rows.Values() + + for i, v := range values { + values[i] = castValueByOID(v, oids[i]) + } + + rowValues = append(rowValues, values) + } + + return rowValues, nil +} + +func castValueByOID(val any, oid uint32) any { + if val == nil { + return nil + } + + switch oid { + case 3904: + if r, ok := val.(pgtype.Range[any]); ok { + newRange := pgtype.Range[int32]{ + LowerType: r.LowerType, + UpperType: r.UpperType, + Valid: r.Valid, + } + if r.Lower != nil { + newRange.Lower = anyToInt32(r.Lower) + } + if r.Upper != nil { + newRange.Upper = anyToInt32(r.Upper) + } + return newRange + } + + case 3906: + if r, ok := val.(pgtype.Range[any]); ok { + newRange := pgtype.Range[pgtype.Numeric]{ + LowerType: r.LowerType, + UpperType: r.UpperType, + Valid: r.Valid, + } + if r.Lower != nil { + newRange.Lower = r.Lower.(pgtype.Numeric) + } + if r.Upper != nil { + newRange.Upper = r.Upper.(pgtype.Numeric) + } + return newRange + } + } + + return val +} + +func anyToInt32(v any) int32 { + switch t := v.(type) { + case int32: + return t + case int64: + return int32(t) + case int: + return int32(t) + case float64: + return int32(t) + default: + log.Warnf("Valor inesperado en rango: %T", v) + return 0 + } +} + +func insertData(ctx context.Context, targetPool *pgxpool.Pool, schema string, table string, colNames []string, rowValues [][]any) (int64, error) { + identifier := pgx.Identifier{schema, table} + + count, err := targetPool.CopyFrom( + ctx, + identifier, + colNames, + pgx.CopyFromRows(rowValues), + ) + + if err != nil { + return 0, fmt.Errorf("error en CopyFrom: %w", err) + } + + return count, nil +} + +func Map[T any, V any](input []T, mapper func(T) V) []V { + result := make([]V, len(input)) + + for i, v := range input { + result[i] = mapper(v) + } + + return result +}