diff --git a/batch.go b/batch.go
index d5e7dc8ec..fbd41cfc5 100644
--- a/batch.go
+++ b/batch.go
@@ -493,14 +493,14 @@ func (br *emptyBatchResults) Close() error {
func invalidateCachesOnBatchResultsError(conn *Conn, b *Batch, err error) {
if err != nil && conn != nil && b != nil {
if sc := conn.statementCache; sc != nil {
- for _, bi := range b.QueuedQueries {
- sc.Invalidate(bi.SQL)
+ for i := range b.QueuedQueries {
+ sc.Invalidate(b.QueuedQueries[i].SQL)
}
}
if sc := conn.descriptionCache; sc != nil {
- for _, bi := range b.QueuedQueries {
- sc.Invalidate(bi.SQL)
+ for i := range b.QueuedQueries {
+ sc.Invalidate(b.QueuedQueries[i].SQL)
}
}
}
diff --git a/bench_test.go b/bench_test.go
index 3acb2d948..4a2267024 100644
--- a/bench_test.go
+++ b/bench_test.go
@@ -43,7 +43,7 @@ func BenchmarkMinimalUnpreparedSelectWithoutStatementCache(b *testing.B) {
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err := conn.QueryRow(context.Background(), "select $1::int8", i).Scan(&n)
if err != nil {
b.Fatal(err)
@@ -66,7 +66,7 @@ func BenchmarkMinimalUnpreparedSelectWithStatementCacheModeDescribe(b *testing.B
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err := conn.QueryRow(context.Background(), "select $1::int8", i).Scan(&n)
if err != nil {
b.Fatal(err)
@@ -89,7 +89,7 @@ func BenchmarkMinimalUnpreparedSelectWithStatementCacheModePrepare(b *testing.B)
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err := conn.QueryRow(context.Background(), "select $1::int8", i).Scan(&n)
if err != nil {
b.Fatal(err)
@@ -112,7 +112,7 @@ func BenchmarkMinimalPreparedSelect(b *testing.B) {
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err = conn.QueryRow(context.Background(), "ps1", i).Scan(&n)
if err != nil {
b.Fatal(err)
diff --git a/conn.go b/conn.go
index 42e955fcc..c0ed90df3 100644
--- a/conn.go
+++ b/conn.go
@@ -314,7 +314,7 @@ func (c *Conn) Close(ctx context.Context) error {
//
// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same name and sql arguments. This
// allows a code path to Prepare and Query/Exec without concern for if the statement has already been prepared.
-func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error) {
+func (c *Conn) Prepare(ctx context.Context, name, sqlVar string) (sd *pgconn.StatementDescription, err error) {
if c.failedDescribeStatement != "" {
err = c.Deallocate(ctx, c.failedDescribeStatement)
if err != nil {
@@ -324,12 +324,12 @@ func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.Statem
}
if c.prepareTracer != nil {
- ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sql})
+ ctx = c.prepareTracer.TracePrepareStart(ctx, c, TracePrepareStartData{Name: name, SQL: sqlVar})
}
if name != "" {
var ok bool
- if sd, ok = c.preparedStatements[name]; ok && sd.SQL == sql {
+ if sd, ok = c.preparedStatements[name]; ok && sd.SQL == sqlVar {
if c.prepareTracer != nil {
c.prepareTracer.TracePrepareEnd(ctx, c, TracePrepareEndData{AlreadyPrepared: true})
}
@@ -344,16 +344,16 @@ func (c *Conn) Prepare(ctx context.Context, name, sql string) (sd *pgconn.Statem
}
var psName, psKey string
- if name == sql {
- digest := sha256.Sum256([]byte(sql))
+ if name == sqlVar {
+ digest := sha256.Sum256([]byte(sqlVar))
psName = "stmt_" + hex.EncodeToString(digest[0:24])
- psKey = sql
+ psKey = sqlVar
} else {
psName = name
psKey = name
}
- sd, err = c.pgConn.Prepare(ctx, psName, sql, nil)
+ sd, err = c.pgConn.Prepare(ctx, psName, sqlVar, nil)
if err != nil {
var pErr *pgconn.PrepareError
if errors.As(err, &pErr) {
@@ -467,16 +467,16 @@ func (c *Conn) Config() *ConnConfig { return c.config.Copy() }
// Exec executes sql. sql can be either a prepared statement name or an SQL string. arguments should be referenced
// positionally from the sql string as $1, $2, etc.
-func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.CommandTag, error) {
+func (c *Conn) Exec(ctx context.Context, sqlVar string, arguments ...any) (pgconn.CommandTag, error) {
if c.queryTracer != nil {
- ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: arguments})
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sqlVar, Args: arguments})
}
if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
return pgconn.CommandTag{}, err
}
- commandTag, err := c.exec(ctx, sql, arguments...)
+ commandTag, err := c.exec(ctx, sqlVar, arguments...)
if c.queryTracer != nil {
c.queryTracer.TraceQueryEnd(ctx, c, TraceQueryEndData{CommandTag: commandTag, Err: err})
@@ -485,9 +485,12 @@ func (c *Conn) Exec(ctx context.Context, sql string, arguments ...any) (pgconn.C
return commandTag, err
}
-func (c *Conn) exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
- mode := c.config.DefaultQueryExecMode
- var queryRewriter QueryRewriter
+func (c *Conn) exec(ctx context.Context, sqlVar string, arguments ...any) (commandTag pgconn.CommandTag, err error) {
+
+ var (
+ mode = c.config.DefaultQueryExecMode
+ queryRewriter QueryRewriter
+ )
optionLoop:
for len(arguments) > 0 {
@@ -495,16 +498,19 @@ optionLoop:
case QueryExecMode:
mode = arg
arguments = arguments[1:]
+
case QueryRewriter:
queryRewriter = arg
arguments = arguments[1:]
+
default:
break optionLoop
+
}
}
if queryRewriter != nil {
- sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ sqlVar, arguments, err = queryRewriter.RewriteQuery(ctx, c, sqlVar, arguments)
if err != nil {
return pgconn.CommandTag{}, fmt.Errorf("rewrite query failed: %w", err)
}
@@ -515,7 +521,7 @@ optionLoop:
mode = QueryExecModeSimpleProtocol
}
- if sd, ok := c.preparedStatements[sql]; ok {
+ if sd, ok := c.preparedStatements[sqlVar]; ok {
return c.execPrepared(ctx, sd, arguments)
}
@@ -524,9 +530,9 @@ optionLoop:
if c.statementCache == nil {
return pgconn.CommandTag{}, errDisabledStatementCache
}
- sd := c.statementCache.Get(sql)
+ sd := c.statementCache.Get(sqlVar)
if sd == nil {
- sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sqlVar), sqlVar)
if err != nil {
return pgconn.CommandTag{}, err
}
@@ -534,13 +540,14 @@ optionLoop:
}
return c.execPrepared(ctx, sd, arguments)
+
case QueryExecModeCacheDescribe:
if c.descriptionCache == nil {
return pgconn.CommandTag{}, errDisabledDescriptionCache
}
- sd := c.descriptionCache.Get(sql)
+ sd := c.descriptionCache.Get(sqlVar)
if sd == nil {
- sd, err = c.Prepare(ctx, "", sql)
+ sd, err = c.Prepare(ctx, "", sqlVar)
if err != nil {
return pgconn.CommandTag{}, err
}
@@ -548,30 +555,35 @@ optionLoop:
}
return c.execParams(ctx, sd, arguments)
+
case QueryExecModeDescribeExec:
- sd, err := c.Prepare(ctx, "", sql)
+ sd, err := c.Prepare(ctx, "", sqlVar)
if err != nil {
return pgconn.CommandTag{}, err
}
return c.execPrepared(ctx, sd, arguments)
+
case QueryExecModeExec:
- return c.execSQLParams(ctx, sql, arguments)
+ return c.execSQLParams(ctx, sqlVar, arguments)
+
case QueryExecModeSimpleProtocol:
- return c.execSimpleProtocol(ctx, sql, arguments)
+ return c.execSimpleProtocol(ctx, sqlVar, arguments)
+
default:
return pgconn.CommandTag{}, fmt.Errorf("unknown QueryExecMode: %v", mode)
+
}
}
-func (c *Conn) execSimpleProtocol(ctx context.Context, sql string, arguments []any) (commandTag pgconn.CommandTag, err error) {
+func (c *Conn) execSimpleProtocol(ctx context.Context, sqlVar string, arguments []any) (commandTag pgconn.CommandTag, err error) {
if len(arguments) > 0 {
- sql, err = c.sanitizeForSimpleQuery(sql, arguments...)
+ sqlVar, err = c.sanitizeForSimpleQuery(sqlVar, arguments...)
if err != nil {
return pgconn.CommandTag{}, err
}
}
- mrr := c.pgConn.Exec(ctx, sql)
+ mrr := c.pgConn.Exec(ctx, sqlVar)
for mrr.NextResult() {
commandTag, _ = mrr.ResultReader().Close()
}
@@ -601,25 +613,25 @@ func (c *Conn) execPrepared(ctx context.Context, sd *pgconn.StatementDescription
return result.CommandTag, result.Err
}
-func (c *Conn) execSQLParams(ctx context.Context, sql string, args []any) (pgconn.CommandTag, error) {
+func (c *Conn) execSQLParams(ctx context.Context, sqlVar string, args []any) (pgconn.CommandTag, error) {
err := c.eqb.Build(c.typeMap, nil, args)
if err != nil {
return pgconn.CommandTag{}, err
}
- result := c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
+ result := c.pgConn.ExecParams(ctx, sqlVar, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats).Read()
c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
return result.CommandTag, result.Err
}
-func (c *Conn) getRows(ctx context.Context, sql string, args []any) *baseRows {
+func (c *Conn) getRows(ctx context.Context, sqlVar string, args []any) *baseRows {
r := &baseRows{}
r.ctx = ctx
r.queryTracer = c.queryTracer
r.typeMap = c.typeMap
r.startTime = time.Now()
- r.sql = sql
+ r.sql = sqlVar
r.args = args
r.conn = c
@@ -710,7 +722,7 @@ type QueryResultFormatsByOID map[uint32]int16
// QueryRewriter rewrites a query when used as the first arguments to a query method.
type QueryRewriter interface {
- RewriteQuery(ctx context.Context, conn *Conn, sql string, args []any) (newSQL string, newArgs []any, err error)
+ RewriteQuery(ctx context.Context, conn *Conn, sqlVar string, args []any) (newSQL string, newArgs []any, err error)
}
// Query sends a query to the server and returns a Rows to read the results. Only errors encountered sending the query
@@ -734,9 +746,9 @@ type QueryRewriter interface {
// For extra control over how the query is executed, the types QueryExecMode, QueryResultFormats, and
// QueryResultFormatsByOID may be used as the first args to control exactly how the query is executed. This is rarely
// needed. See the documentation for those types for details.
-func (c *Conn) Query(ctx context.Context, sql string, args ...any) (Rows, error) {
+func (c *Conn) Query(ctx context.Context, sqlVar string, args ...any) (Rows, error) {
if c.queryTracer != nil {
- ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sql, Args: args})
+ ctx = c.queryTracer.TraceQueryStart(ctx, c, TraceQueryStartData{SQL: sqlVar, Args: args})
}
if err := c.deallocateInvalidatedCachedStatements(ctx); err != nil {
@@ -753,29 +765,37 @@ func (c *Conn) Query(ctx context.Context, sql string, args ...any) (Rows, error)
optionLoop:
for len(args) > 0 {
+
switch arg := args[0].(type) {
case QueryResultFormats:
resultFormats = arg
args = args[1:]
+
case QueryResultFormatsByOID:
resultFormatsByOID = arg
args = args[1:]
+
case QueryExecMode:
mode = arg
args = args[1:]
+
case QueryRewriter:
queryRewriter = arg
args = args[1:]
+
default:
break optionLoop
}
+
}
if queryRewriter != nil {
- var err error
- originalSQL := sql
- originalArgs := args
- sql, args, err = queryRewriter.RewriteQuery(ctx, c, sql, args)
+ var (
+ err error
+ originalSQL = sqlVar
+ originalArgs = args
+ )
+ sqlVar, args, err = queryRewriter.RewriteQuery(ctx, c, sqlVar, args)
if err != nil {
rows := c.getRows(ctx, originalSQL, originalArgs)
err = fmt.Errorf("rewrite query failed: %w", err)
@@ -785,18 +805,21 @@ optionLoop:
}
// Bypass any statement caching.
- if sql == "" {
+ if sqlVar == "" {
mode = QueryExecModeSimpleProtocol
}
c.eqb.reset()
- rows := c.getRows(ctx, sql, args)
- var err error
- sd, explicitPreparedStatement := c.preparedStatements[sql]
- if sd != nil || mode == QueryExecModeCacheStatement || mode == QueryExecModeCacheDescribe || mode == QueryExecModeDescribeExec {
+ var (
+ rows = c.getRows(ctx, sqlVar, args)
+ err error
+ sd, explicitPreparedStatement = c.preparedStatements[sqlVar]
+ )
+ switch {
+ case sd != nil || mode == QueryExecModeCacheStatement || mode == QueryExecModeCacheDescribe || mode == QueryExecModeDescribeExec:
if sd == nil {
- sd, err = c.getStatementDescription(ctx, mode, sql)
+ sd, err = c.getStatementDescription(ctx, mode, sqlVar)
if err != nil {
rows.fatal(err)
return rows, err
@@ -828,26 +851,28 @@ optionLoop:
}
if !explicitPreparedStatement && mode == QueryExecModeCacheDescribe {
- rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, resultFormats)
+ rows.resultReader = c.pgConn.ExecParams(ctx, sqlVar, c.eqb.ParamValues, sd.ParamOIDs, c.eqb.ParamFormats, resultFormats)
} else {
rows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, resultFormats)
}
- } else if mode == QueryExecModeExec {
+
+ case mode == QueryExecModeExec:
err := c.eqb.Build(c.typeMap, nil, args)
if err != nil {
rows.fatal(err)
return rows, rows.err
}
- rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
- } else if mode == QueryExecModeSimpleProtocol {
- sql, err = c.sanitizeForSimpleQuery(sql, args...)
+ rows.resultReader = c.pgConn.ExecParams(ctx, sqlVar, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+
+ case mode == QueryExecModeSimpleProtocol:
+ sqlVar, err = c.sanitizeForSimpleQuery(sqlVar, args...)
if err != nil {
rows.fatal(err)
return rows, err
}
- mrr := c.pgConn.Exec(ctx, sql)
+ mrr := c.pgConn.Exec(ctx, sqlVar)
if mrr.NextResult() {
rows.resultReader = mrr.ResultReader()
rows.multiResultReader = mrr
@@ -858,10 +883,12 @@ optionLoop:
}
return rows, nil
- } else {
+
+ default:
err = fmt.Errorf("unknown QueryExecMode: %v", mode)
rows.fatal(err)
return rows, rows.err
+
}
c.eqb.reset() // Allow c.eqb internal memory to be GC'ed as soon as possible.
@@ -877,16 +904,16 @@ optionLoop:
func (c *Conn) getStatementDescription(
ctx context.Context,
mode QueryExecMode,
- sql string,
+ sqlVar string,
) (sd *pgconn.StatementDescription, err error) {
switch mode {
case QueryExecModeCacheStatement:
if c.statementCache == nil {
return nil, errDisabledStatementCache
}
- sd = c.statementCache.Get(sql)
+ sd = c.statementCache.Get(sqlVar)
if sd == nil {
- sd, err = c.Prepare(ctx, stmtcache.StatementName(sql), sql)
+ sd, err = c.Prepare(ctx, stmtcache.StatementName(sqlVar), sqlVar)
if err != nil {
return nil, err
}
@@ -896,16 +923,16 @@ func (c *Conn) getStatementDescription(
if c.descriptionCache == nil {
return nil, errDisabledDescriptionCache
}
- sd = c.descriptionCache.Get(sql)
+ sd = c.descriptionCache.Get(sqlVar)
if sd == nil {
- sd, err = c.Prepare(ctx, "", sql)
+ sd, err = c.Prepare(ctx, "", sqlVar)
if err != nil {
return nil, err
}
c.descriptionCache.Put(sd)
}
case QueryExecModeDescribeExec:
- return c.Prepare(ctx, "", sql)
+ return c.Prepare(ctx, "", sqlVar)
}
return sd, err
}
@@ -913,8 +940,8 @@ func (c *Conn) getStatementDescription(
// QueryRow is a convenience wrapper over Query. Any error that occurs while
// querying is deferred until calling Scan on the returned Row. That Row will
// error with ErrNoRows if no rows are returned.
-func (c *Conn) QueryRow(ctx context.Context, sql string, args ...any) Row {
- rows, _ := c.Query(ctx, sql, args...)
+func (c *Conn) QueryRow(ctx context.Context, sqlVar string, args ...any) Row {
+ rows, _ := c.Query(ctx, sqlVar, args...)
return (*connRow)(rows.(*baseRows))
}
@@ -943,10 +970,12 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
return &batchResults{ctx: ctx, conn: c, err: err}
}
- for _, bi := range b.QueuedQueries {
- var queryRewriter QueryRewriter
- sql := bi.SQL
- arguments := bi.Arguments
+ for i := range b.QueuedQueries {
+ var (
+ queryRewriter QueryRewriter
+ sqlVar = b.QueuedQueries[i].SQL
+ arguments = b.QueuedQueries[i].Arguments
+ )
optionLoop:
for len(arguments) > 0 {
@@ -962,14 +991,14 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
if queryRewriter != nil {
var err error
- sql, arguments, err = queryRewriter.RewriteQuery(ctx, c, sql, arguments)
+ sqlVar, arguments, err = queryRewriter.RewriteQuery(ctx, c, sqlVar, arguments)
if err != nil {
return &batchResults{ctx: ctx, conn: c, err: fmt.Errorf("rewrite query failed: %w", err)}
}
}
- bi.SQL = sql
- bi.Arguments = arguments
+ b.QueuedQueries[i].SQL = sqlVar
+ b.QueuedQueries[i].Arguments = arguments
}
// TODO: changing mode per batch? Update Batch.Queue function comment when implemented
@@ -979,9 +1008,9 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
}
// All other modes use extended protocol and thus can use prepared statements.
- for _, bi := range b.QueuedQueries {
- if sd, ok := c.preparedStatements[bi.SQL]; ok {
- bi.sd = sd
+ for i := range b.QueuedQueries {
+ if sd, ok := c.preparedStatements[b.QueuedQueries[i].SQL]; ok {
+ b.QueuedQueries[i].sd = sd
}
}
@@ -1001,15 +1030,15 @@ func (c *Conn) SendBatch(ctx context.Context, b *Batch) (br BatchResults) {
func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batch) *batchResults {
var sb strings.Builder
- for i, bi := range b.QueuedQueries {
+ for i := range b.QueuedQueries {
if i > 0 {
sb.WriteByte(';')
}
- sql, err := c.sanitizeForSimpleQuery(bi.SQL, bi.Arguments...)
+ sqlVar, err := c.sanitizeForSimpleQuery(b.QueuedQueries[i].SQL, b.QueuedQueries[i].Arguments...)
if err != nil {
return &batchResults{ctx: ctx, conn: c, err: err}
}
- sb.WriteString(sql)
+ sb.WriteString(sqlVar)
}
mrr := c.pgConn.Exec(ctx, sb.String())
return &batchResults{
@@ -1024,21 +1053,21 @@ func (c *Conn) sendBatchQueryExecModeSimpleProtocol(ctx context.Context, b *Batc
func (c *Conn) sendBatchQueryExecModeExec(ctx context.Context, b *Batch) *batchResults {
batch := &pgconn.Batch{}
- for _, bi := range b.QueuedQueries {
- sd := bi.sd
+ for i := range b.QueuedQueries {
+ sd := b.QueuedQueries[i].sd
if sd != nil {
- err := c.eqb.Build(c.typeMap, sd, bi.Arguments)
+ err := c.eqb.Build(c.typeMap, sd, b.QueuedQueries[i].Arguments)
if err != nil {
return &batchResults{ctx: ctx, conn: c, err: err}
}
batch.ExecPrepared(sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
} else {
- err := c.eqb.Build(c.typeMap, nil, bi.Arguments)
+ err := c.eqb.Build(c.typeMap, nil, b.QueuedQueries[i].Arguments)
if err != nil {
return &batchResults{ctx: ctx, conn: c, err: err}
}
- batch.ExecParams(bi.SQL, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ batch.ExecParams(b.QueuedQueries[i].SQL, c.eqb.ParamValues, nil, c.eqb.ParamFormats, c.eqb.ResultFormats)
}
}
@@ -1063,22 +1092,22 @@ func (c *Conn) sendBatchQueryExecModeCacheStatement(ctx context.Context, b *Batc
distinctNewQueries := []*pgconn.StatementDescription{}
distinctNewQueriesIdxMap := make(map[string]int)
- for _, bi := range b.QueuedQueries {
- if bi.sd == nil {
- sd := c.statementCache.Get(bi.SQL)
+ for i := range b.QueuedQueries {
+ if b.QueuedQueries[i].sd == nil {
+ sd := c.statementCache.Get(b.QueuedQueries[i].SQL)
if sd != nil {
- bi.sd = sd
+ b.QueuedQueries[i].sd = sd
} else {
- if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
- bi.sd = distinctNewQueries[idx]
+ if idx, present := distinctNewQueriesIdxMap[b.QueuedQueries[i].SQL]; present {
+ b.QueuedQueries[i].sd = distinctNewQueries[idx]
} else {
sd = &pgconn.StatementDescription{
- Name: stmtcache.StatementName(bi.SQL),
- SQL: bi.SQL,
+ Name: stmtcache.StatementName(b.QueuedQueries[i].SQL),
+ SQL: b.QueuedQueries[i].SQL,
}
distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
distinctNewQueries = append(distinctNewQueries, sd)
- bi.sd = sd
+ b.QueuedQueries[i].sd = sd
}
}
}
@@ -1095,21 +1124,21 @@ func (c *Conn) sendBatchQueryExecModeCacheDescribe(ctx context.Context, b *Batch
distinctNewQueries := []*pgconn.StatementDescription{}
distinctNewQueriesIdxMap := make(map[string]int)
- for _, bi := range b.QueuedQueries {
- if bi.sd == nil {
- sd := c.descriptionCache.Get(bi.SQL)
+ for i := range b.QueuedQueries {
+ if b.QueuedQueries[i].sd == nil {
+ sd := c.descriptionCache.Get(b.QueuedQueries[i].SQL)
if sd != nil {
- bi.sd = sd
+ b.QueuedQueries[i].sd = sd
} else {
- if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
- bi.sd = distinctNewQueries[idx]
+ if idx, present := distinctNewQueriesIdxMap[b.QueuedQueries[i].SQL]; present {
+ b.QueuedQueries[i].sd = distinctNewQueries[idx]
} else {
sd = &pgconn.StatementDescription{
- SQL: bi.SQL,
+ SQL: b.QueuedQueries[i].SQL,
}
distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
distinctNewQueries = append(distinctNewQueries, sd)
- bi.sd = sd
+ b.QueuedQueries[i].sd = sd
}
}
}
@@ -1122,17 +1151,17 @@ func (c *Conn) sendBatchQueryExecModeDescribeExec(ctx context.Context, b *Batch)
distinctNewQueries := []*pgconn.StatementDescription{}
distinctNewQueriesIdxMap := make(map[string]int)
- for _, bi := range b.QueuedQueries {
- if bi.sd == nil {
- if idx, present := distinctNewQueriesIdxMap[bi.SQL]; present {
- bi.sd = distinctNewQueries[idx]
+ for i := range b.QueuedQueries {
+ if b.QueuedQueries[i].sd == nil {
+ if idx, present := distinctNewQueriesIdxMap[b.QueuedQueries[i].SQL]; present {
+ b.QueuedQueries[i].sd = distinctNewQueries[idx]
} else {
sd := &pgconn.StatementDescription{
- SQL: bi.SQL,
+ SQL: b.QueuedQueries[i].SQL,
}
distinctNewQueriesIdxMap[sd.SQL] = len(distinctNewQueries)
distinctNewQueries = append(distinctNewQueries, sd)
- bi.sd = sd
+ b.QueuedQueries[i].sd = sd
}
}
}
@@ -1151,23 +1180,23 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
// Prepare any needed queries
if len(distinctNewQueries) > 0 {
err := func() (err error) {
- for _, sd := range distinctNewQueries {
- pipeline.SendPrepare(sd.Name, sd.SQL, nil)
+ for i := range distinctNewQueries {
+ pipeline.SendPrepare(distinctNewQueries[i].Name, distinctNewQueries[i].SQL, nil)
}
// Store all statements we are preparing into the cache. It's fine if it overflows because HandleInvalidated will
// clean them up later.
if sdCache != nil {
- for _, sd := range distinctNewQueries {
- sdCache.Put(sd)
+ for i := range distinctNewQueries {
+ sdCache.Put(distinctNewQueries[i])
}
}
// If something goes wrong preparing the statements, we need to invalidate the cache entries we just added.
defer func() {
if err != nil && sdCache != nil {
- for _, sd := range distinctNewQueries {
- sdCache.Invalidate(sd.SQL)
+ for i := range distinctNewQueries {
+ sdCache.Invalidate(distinctNewQueries[i].SQL)
}
}
}()
@@ -1177,7 +1206,7 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
return err
}
- for _, sd := range distinctNewQueries {
+ for i := range distinctNewQueries {
results, err := pipeline.GetResults()
if err != nil {
return err
@@ -1189,8 +1218,8 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
}
// Fill in the previously empty / pending statement descriptions.
- sd.ParamOIDs = resultSD.ParamOIDs
- sd.Fields = resultSD.Fields
+ distinctNewQueries[i].ParamOIDs = resultSD.ParamOIDs
+ distinctNewQueries[i].Fields = resultSD.Fields
}
results, err := pipeline.GetResults()
@@ -1211,18 +1240,18 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
}
// Queue the queries.
- for _, bi := range b.QueuedQueries {
- err := c.eqb.Build(c.typeMap, bi.sd, bi.Arguments)
+ for i := range b.QueuedQueries {
+ err := c.eqb.Build(c.typeMap, b.QueuedQueries[i].sd, b.QueuedQueries[i].Arguments)
if err != nil {
// we wrap the error so we the user can understand which query failed inside the batch
- err = fmt.Errorf("error building query %s: %w", bi.SQL, err)
+ err = fmt.Errorf("error building query %s: %w", b.QueuedQueries[i].SQL, err)
return &pipelineBatchResults{ctx: ctx, conn: c, err: err, closed: true}
}
- if bi.sd.Name == "" {
- pipeline.SendQueryParams(bi.sd.SQL, c.eqb.ParamValues, bi.sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ if b.QueuedQueries[i].sd.Name == "" {
+ pipeline.SendQueryParams(b.QueuedQueries[i].sd.SQL, c.eqb.ParamValues, b.QueuedQueries[i].sd.ParamOIDs, c.eqb.ParamFormats, c.eqb.ResultFormats)
} else {
- pipeline.SendQueryPrepared(bi.sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
+ pipeline.SendQueryPrepared(b.QueuedQueries[i].sd.Name, c.eqb.ParamValues, c.eqb.ParamFormats, c.eqb.ResultFormats)
}
}
@@ -1239,7 +1268,7 @@ func (c *Conn) sendBatchExtendedWithDescription(ctx context.Context, b *Batch, d
}
}
-func (c *Conn) sanitizeForSimpleQuery(sql string, args ...any) (string, error) {
+func (c *Conn) sanitizeForSimpleQuery(sqlVar string, args ...any) (string, error) {
if c.pgConn.ParameterStatus("standard_conforming_strings") != "on" {
return "", errors.New("simple protocol queries must be run with standard_conforming_strings=on")
}
@@ -1248,16 +1277,18 @@ func (c *Conn) sanitizeForSimpleQuery(sql string, args ...any) (string, error) {
return "", errors.New("simple protocol queries must be run with client_encoding=UTF8")
}
- var err error
- valueArgs := make([]any, len(args))
- for i, a := range args {
- valueArgs[i], err = convertSimpleArgument(c.typeMap, a)
+ var (
+ err error
+ valueArgs = make([]any, len(args))
+ )
+ for i := range args {
+ valueArgs[i], err = convertSimpleArgument(c.typeMap, args[i])
if err != nil {
return "", err
}
}
- return sanitize.SanitizeSQL(sql, valueArgs...)
+ return sanitize.SanitizeSQL(sqlVar, valueArgs...)
}
// LoadType inspects the database for typeName and produces a pgtype.Type suitable for registration. typeName must be
@@ -1431,8 +1462,8 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error
pipeline := c.pgConn.StartPipeline(ctx)
defer pipeline.Close()
- for _, sd := range invalidatedStatements {
- pipeline.SendDeallocate(sd.Name)
+ for i := range invalidatedStatements {
+ pipeline.SendDeallocate(invalidatedStatements[i].Name)
}
err := pipeline.Sync()
@@ -1446,8 +1477,8 @@ func (c *Conn) deallocateInvalidatedCachedStatements(ctx context.Context) error
}
c.statementCache.RemoveInvalidated()
- for _, sd := range invalidatedStatements {
- delete(c.preparedStatements, sd.Name)
+ for i := range invalidatedStatements {
+ delete(c.preparedStatements, invalidatedStatements[i].Name)
}
return nil
diff --git a/conn_test.go b/conn_test.go
index 8c3e1f1e6..7e6c3d2c6 100644
--- a/conn_test.go
+++ b/conn_test.go
@@ -259,7 +259,7 @@ type testQueryRewriter struct {
args []any
}
-func (qr *testQueryRewriter) RewriteQuery(ctx context.Context, conn *pgx.Conn, sql string, args []any) (newSQL string, newArgs []any, err error) {
+func (qr *testQueryRewriter) RewriteQuery(ctx context.Context, conn *pgx.Conn, sqlVar string, args []any) (newSQL string, newArgs []any, err error) {
return qr.sql, qr.args, nil
}
@@ -601,17 +601,17 @@ func TestPrepareWithDigestedName(t *testing.T) {
defer cancel()
pgxtest.RunWithQueryExecModes(ctx, t, defaultConnTestRunner, nil, func(ctx context.Context, t testing.TB, conn *pgx.Conn) {
- sql := "select $1::text"
- sd, err := conn.Prepare(ctx, sql, sql)
+ sqlVar := "select $1::text"
+ sd, err := conn.Prepare(ctx, sqlVar, sqlVar)
require.NoError(t, err)
require.Equal(t, "stmt_2510cc7db17de3f42758a2a29c8b9ef8305d007b997ebdd6", sd.Name)
var s string
- err = conn.QueryRow(ctx, sql, "hello").Scan(&s)
+ err = conn.QueryRow(ctx, sqlVar, "hello").Scan(&s)
require.NoError(t, err)
require.Equal(t, "hello", s)
- err = conn.Deallocate(ctx, sql)
+ err = conn.Deallocate(ctx, sqlVar)
require.NoError(t, err)
})
}
@@ -627,26 +627,26 @@ func TestDeallocateInAbortedTransaction(t *testing.T) {
tx, err := conn.Begin(ctx)
require.NoError(t, err)
- sql := "select $1::text"
- sd, err := tx.Prepare(ctx, sql, sql)
+ sqlVar := "select $1::text"
+ sd, err := tx.Prepare(ctx, sqlVar, sqlVar)
require.NoError(t, err)
require.Equal(t, "stmt_2510cc7db17de3f42758a2a29c8b9ef8305d007b997ebdd6", sd.Name)
var s string
- err = tx.QueryRow(ctx, sql, "hello").Scan(&s)
+ err = tx.QueryRow(ctx, sqlVar, "hello").Scan(&s)
require.NoError(t, err)
require.Equal(t, "hello", s)
_, err = tx.Exec(ctx, "select 1/0") // abort transaction with divide by zero error
require.Error(t, err)
- err = conn.Deallocate(ctx, sql)
+ err = conn.Deallocate(ctx, sqlVar)
require.NoError(t, err)
err = tx.Rollback(ctx)
require.NoError(t, err)
- sd, err = conn.Prepare(ctx, sql, sql)
+ sd, err = conn.Prepare(ctx, sqlVar, sqlVar)
require.NoError(t, err)
require.Equal(t, "stmt_2510cc7db17de3f42758a2a29c8b9ef8305d007b997ebdd6", sd.Name)
})
diff --git a/copy_from.go b/copy_from.go
index abcd22396..2dc0b4da9 100644
--- a/copy_from.go
+++ b/copy_from.go
@@ -123,17 +123,21 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
})
}
- quotedTableName := ct.tableName.Sanitize()
- cbuf := &bytes.Buffer{}
- for i, cn := range ct.columnNames {
+ var (
+ cbuf = &bytes.Buffer{}
+ )
+ for i := range ct.columnNames {
if i != 0 {
cbuf.WriteString(", ")
}
- cbuf.WriteString(quoteIdentifier(cn))
+ cbuf.WriteString(quoteIdentifier(ct.columnNames[i]))
}
- quotedColumnNames := cbuf.String()
- var sd *pgconn.StatementDescription
+ var (
+ quotedTableName = ct.tableName.Sanitize()
+ quotedColumnNames = cbuf.String()
+ sd *pgconn.StatementDescription
+ )
switch ct.mode {
case QueryExecModeExec, QueryExecModeSimpleProtocol:
// These modes don't support the binary format. Before the inclusion of the
@@ -144,6 +148,7 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
// we'll default to that mode.
ct.mode = QueryExecModeDescribeExec
fallthrough
+
case QueryExecModeCacheStatement, QueryExecModeCacheDescribe, QueryExecModeDescribeExec:
var err error
sd, err = ct.conn.getStatementDescription(
@@ -154,8 +159,10 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
if err != nil {
return 0, fmt.Errorf("statement description failed: %w", err)
}
+
default:
return 0, fmt.Errorf("unknown QueryExecMode: %v", ct.mode)
+
}
r, w := io.Pipe()
@@ -214,10 +221,14 @@ func (ct *copyFrom) run(ctx context.Context) (int64, error) {
return commandTag.RowsAffected(), err
}
-func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (bool, []byte, error) {
+func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (success bool, data []byte, err error) {
+
const sendBufSize = 65536 - 5 // The packet has a 5-byte header
- lastBufLen := 0
- largestRowLen := 0
+
+ var (
+ lastBufLen = 0
+ largestRowLen = 0
+ )
for ct.rowSrc.Next() {
lastBufLen = len(buf)
@@ -231,8 +242,8 @@ func (ct *copyFrom) buildCopyBuf(buf []byte, sd *pgconn.StatementDescription) (b
}
buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
- for i, val := range values {
- buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, val)
+ for i := range values {
+ buf, err = encodeCopyValue(ct.conn.typeMap, buf, sd.Fields[i].DataTypeOID, values[i])
if err != nil {
return false, nil, err
}
diff --git a/derived_types.go b/derived_types.go
index 72c0a2423..a7f2d251a 100644
--- a/derived_types.go
+++ b/derived_types.go
@@ -162,7 +162,7 @@ type derivedTypeInfo struct {
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
m := c.TypeMap()
if len(typeNames) == 0 {
- return nil, fmt.Errorf("No type names were supplied.")
+ return nil, fmt.Errorf("no type names were supplied.")
}
// Disregard server version errors. This will result in
@@ -171,7 +171,7 @@ func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Typ
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
rows, err := c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
if err != nil {
- return nil, fmt.Errorf("While generating load types query: %w", err)
+ return nil, fmt.Errorf("while generating load types query: %w", err)
}
defer rows.Close()
result := make([]*pgtype.Type, 0, 100)
@@ -179,31 +179,31 @@ func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Typ
ti := derivedTypeInfo{}
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
if err != nil {
- return nil, fmt.Errorf("While scanning type information: %w", err)
+ return nil, fmt.Errorf("while scanning type information: %w", err)
}
var type_ *pgtype.Type
switch ti.Typtype {
case "b": // array
dt, ok := m.TypeForOID(ti.Typelem)
if !ok {
- return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
+ return nil, fmt.Errorf("array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
case "c": // composite
var fields []pgtype.CompositeCodecField
- for i, fieldName := range ti.Attnames {
+ for i := range ti.Attnames {
dt, ok := m.TypeForOID(ti.Atttypids[i])
if !ok {
- return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
+ return nil, fmt.Errorf("unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, ti.Attnames[i], ti.Atttypids[i])
}
- fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
+ fields = append(fields, pgtype.CompositeCodecField{Name: ti.Attnames[i], Type: dt})
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
case "d": // domain
dt, ok := m.TypeForOID(ti.Typbasetype)
if !ok {
- return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
+ return nil, fmt.Errorf("domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
@@ -212,19 +212,19 @@ func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Typ
case "r": // range
dt, ok := m.TypeForOID(ti.Rngsubtype)
if !ok {
- return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
+ return nil, fmt.Errorf("range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
case "m": // multirange
dt, ok := m.TypeForOID(ti.Rngtypid)
if !ok {
- return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
+ return nil, fmt.Errorf("multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
}
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
default:
- return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
+ return nil, fmt.Errorf("unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
}
// the type_ is imposible to be null
@@ -242,10 +242,10 @@ func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Typ
// serverVersion returns the postgresql server version.
func serverVersion(c *Conn) (int64, error) {
serverVersionStr := c.PgConn().ParameterStatus("server_version")
- serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ serverVersionStr = regexp.MustCompile(`^\d+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
- return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
+ return 0, fmt.Errorf("cannot identify server version in %q", serverVersionStr)
}
version, err := strconv.ParseInt(serverVersionStr, 10, 64)
diff --git a/internal/pgmock/pgmock.go b/internal/pgmock/pgmock.go
index c82d7ffc8..21c78ec51 100644
--- a/internal/pgmock/pgmock.go
+++ b/internal/pgmock/pgmock.go
@@ -18,8 +18,8 @@ type Script struct {
}
func (s *Script) Run(backend *pgproto3.Backend) error {
- for _, step := range s.Steps {
- err := step.Step(backend)
+ for i := range s.Steps {
+ err := s.Steps[i].Step(backend)
if err != nil {
return err
}
@@ -33,8 +33,8 @@ func (s *Script) Step(backend *pgproto3.Backend) error {
}
type expectMessageStep struct {
- want pgproto3.FrontendMessage
- any bool
+ want pgproto3.FrontendMessage
+ anyVar bool
}
func (e *expectMessageStep) Step(backend *pgproto3.Backend) error {
@@ -43,7 +43,7 @@ func (e *expectMessageStep) Step(backend *pgproto3.Backend) error {
return err
}
- if e.any && reflect.TypeOf(msg) == reflect.TypeOf(e.want) {
+ if e.anyVar && reflect.TypeOf(msg) == reflect.TypeOf(e.want) {
return nil
}
@@ -84,12 +84,12 @@ func ExpectAnyMessage(want pgproto3.FrontendMessage) Step {
return expectMessage(want, true)
}
-func expectMessage(want pgproto3.FrontendMessage, any bool) Step {
+func expectMessage(want pgproto3.FrontendMessage, anyVar bool) Step {
if want, ok := want.(*pgproto3.StartupMessage); ok {
- return &expectStartupMessageStep{want: want, any: any}
+ return &expectStartupMessageStep{want: want, any: anyVar}
}
- return &expectMessageStep{want: want, any: any}
+ return &expectMessageStep{want: want, anyVar: anyVar}
}
type sendMessageStep struct {
diff --git a/internal/sanitize/sanitize.go b/internal/sanitize/sanitize.go
index b516817cb..d42bc58c0 100644
--- a/internal/sanitize/sanitize.go
+++ b/internal/sanitize/sanitize.go
@@ -46,8 +46,8 @@ func (q *Query) Sanitize(args ...any) (string, error) {
buf := bufPool.get()
defer bufPool.put(buf)
- for _, part := range q.Parts {
- switch part := part.(type) {
+ for i := range q.Parts {
+ switch part := q.Parts[i].(type) {
case string:
buf.WriteString(part)
case int:
@@ -97,8 +97,8 @@ func (q *Query) Sanitize(args ...any) (string, error) {
}
}
- for i, used := range argUse {
- if !used {
+ for i := range argUse {
+ if !argUse[i] {
return "", fmt.Errorf("unused argument: %d", i)
}
}
@@ -154,7 +154,7 @@ func QuoteString(dst []byte, str string) []byte {
dst = append(dst, quote)
// Iterate through the string without allocating
- for i := 0; i < len(str); i++ {
+ for i := range len(str) {
if str[i] == quote {
dst = append(dst, quote, quote)
} else {
diff --git a/internal/stmtcache/lru_cache.go b/internal/stmtcache/lru_cache.go
index 52b479ad7..4101f9a33 100644
--- a/internal/stmtcache/lru_cache.go
+++ b/internal/stmtcache/lru_cache.go
@@ -16,9 +16,9 @@ type LRUCache struct {
}
// NewLRUCache creates a new LRUCache. cap is the maximum size of the cache.
-func NewLRUCache(cap int) *LRUCache {
+func NewLRUCache(capVar int) *LRUCache {
return &LRUCache{
- cap: cap,
+ cap: capVar,
m: make(map[string]*list.Element),
l: list.New(),
invalidSet: make(map[string]struct{}),
diff --git a/log/testingadapter/adapter.go b/log/testingadapter/adapter.go
index c901a6a65..de66428f9 100644
--- a/log/testingadapter/adapter.go
+++ b/log/testingadapter/adapter.go
@@ -26,8 +26,8 @@ func NewLogger(l TestingLogger) *Logger {
func (l *Logger) Log(ctx context.Context, level tracelog.LogLevel, msg string, data map[string]any) {
logArgs := make([]any, 0, 2+len(data))
logArgs = append(logArgs, level, msg)
- for k, v := range data {
- logArgs = append(logArgs, fmt.Sprintf("%s=%v", k, v))
+ for i := range data {
+ logArgs = append(logArgs, fmt.Sprintf("%s=%v", i, data[i]))
}
l.l.Log(logArgs...)
}
diff --git a/multitracer/tracer.go b/multitracer/tracer.go
index acff17398..7ed84121a 100644
--- a/multitracer/tracer.go
+++ b/multitracer/tracer.go
@@ -24,30 +24,30 @@ type Tracer struct {
func New(tracers ...pgx.QueryTracer) *Tracer {
var t Tracer
- for _, tracer := range tracers {
- t.QueryTracers = append(t.QueryTracers, tracer)
+ for i := range tracers {
+ t.QueryTracers = append(t.QueryTracers, tracers[i])
- if batchTracer, ok := tracer.(pgx.BatchTracer); ok {
+ if batchTracer, ok := tracers[i].(pgx.BatchTracer); ok {
t.BatchTracers = append(t.BatchTracers, batchTracer)
}
- if copyFromTracer, ok := tracer.(pgx.CopyFromTracer); ok {
+ if copyFromTracer, ok := tracers[i].(pgx.CopyFromTracer); ok {
t.CopyFromTracers = append(t.CopyFromTracers, copyFromTracer)
}
- if prepareTracer, ok := tracer.(pgx.PrepareTracer); ok {
+ if prepareTracer, ok := tracers[i].(pgx.PrepareTracer); ok {
t.PrepareTracers = append(t.PrepareTracers, prepareTracer)
}
- if connectTracer, ok := tracer.(pgx.ConnectTracer); ok {
+ if connectTracer, ok := tracers[i].(pgx.ConnectTracer); ok {
t.ConnectTracers = append(t.ConnectTracers, connectTracer)
}
- if poolAcquireTracer, ok := tracer.(pgxpool.AcquireTracer); ok {
+ if poolAcquireTracer, ok := tracers[i].(pgxpool.AcquireTracer); ok {
t.PoolAcquireTracers = append(t.PoolAcquireTracers, poolAcquireTracer)
}
- if poolReleaseTracer, ok := tracer.(pgxpool.ReleaseTracer); ok {
+ if poolReleaseTracer, ok := tracers[i].(pgxpool.ReleaseTracer); ok {
t.PoolReleaseTracers = append(t.PoolReleaseTracers, poolReleaseTracer)
}
}
@@ -56,97 +56,97 @@ func New(tracers ...pgx.QueryTracer) *Tracer {
}
func (t *Tracer) TraceQueryStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryStartData) context.Context {
- for _, tracer := range t.QueryTracers {
- ctx = tracer.TraceQueryStart(ctx, conn, data)
+ for i := range t.QueryTracers {
+ ctx = t.QueryTracers[i].TraceQueryStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceQueryEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceQueryEndData) {
- for _, tracer := range t.QueryTracers {
- tracer.TraceQueryEnd(ctx, conn, data)
+ for i := range t.QueryTracers {
+ t.QueryTracers[i].TraceQueryEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceBatchStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchStartData) context.Context {
- for _, tracer := range t.BatchTracers {
- ctx = tracer.TraceBatchStart(ctx, conn, data)
+ for i := range t.BatchTracers {
+ ctx = t.BatchTracers[i].TraceBatchStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceBatchQuery(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchQueryData) {
- for _, tracer := range t.BatchTracers {
- tracer.TraceBatchQuery(ctx, conn, data)
+ for i := range t.BatchTracers {
+ t.BatchTracers[i].TraceBatchQuery(ctx, conn, data)
}
}
func (t *Tracer) TraceBatchEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceBatchEndData) {
- for _, tracer := range t.BatchTracers {
- tracer.TraceBatchEnd(ctx, conn, data)
+ for i := range t.BatchTracers {
+ t.BatchTracers[i].TraceBatchEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceCopyFromStart(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromStartData) context.Context {
- for _, tracer := range t.CopyFromTracers {
- ctx = tracer.TraceCopyFromStart(ctx, conn, data)
+ for i := range t.CopyFromTracers {
+ ctx = t.CopyFromTracers[i].TraceCopyFromStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TraceCopyFromEnd(ctx context.Context, conn *pgx.Conn, data pgx.TraceCopyFromEndData) {
- for _, tracer := range t.CopyFromTracers {
- tracer.TraceCopyFromEnd(ctx, conn, data)
+ for i := range t.CopyFromTracers {
+ t.CopyFromTracers[i].TraceCopyFromEnd(ctx, conn, data)
}
}
func (t *Tracer) TracePrepareStart(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareStartData) context.Context {
- for _, tracer := range t.PrepareTracers {
- ctx = tracer.TracePrepareStart(ctx, conn, data)
+ for i := range t.PrepareTracers {
+ ctx = t.PrepareTracers[i].TracePrepareStart(ctx, conn, data)
}
return ctx
}
func (t *Tracer) TracePrepareEnd(ctx context.Context, conn *pgx.Conn, data pgx.TracePrepareEndData) {
- for _, tracer := range t.PrepareTracers {
- tracer.TracePrepareEnd(ctx, conn, data)
+ for i := range t.PrepareTracers {
+ t.PrepareTracers[i].TracePrepareEnd(ctx, conn, data)
}
}
func (t *Tracer) TraceConnectStart(ctx context.Context, data pgx.TraceConnectStartData) context.Context {
- for _, tracer := range t.ConnectTracers {
- ctx = tracer.TraceConnectStart(ctx, data)
+ for i := range t.ConnectTracers {
+ ctx = t.ConnectTracers[i].TraceConnectStart(ctx, data)
}
return ctx
}
func (t *Tracer) TraceConnectEnd(ctx context.Context, data pgx.TraceConnectEndData) {
- for _, tracer := range t.ConnectTracers {
- tracer.TraceConnectEnd(ctx, data)
+ for i := range t.ConnectTracers {
+ t.ConnectTracers[i].TraceConnectEnd(ctx, data)
}
}
func (t *Tracer) TraceAcquireStart(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireStartData) context.Context {
- for _, tracer := range t.PoolAcquireTracers {
- ctx = tracer.TraceAcquireStart(ctx, pool, data)
+ for i := range t.PoolAcquireTracers {
+ ctx = t.PoolAcquireTracers[i].TraceAcquireStart(ctx, pool, data)
}
return ctx
}
func (t *Tracer) TraceAcquireEnd(ctx context.Context, pool *pgxpool.Pool, data pgxpool.TraceAcquireEndData) {
- for _, tracer := range t.PoolAcquireTracers {
- tracer.TraceAcquireEnd(ctx, pool, data)
+ for i := range t.PoolAcquireTracers {
+ t.PoolAcquireTracers[i].TraceAcquireEnd(ctx, pool, data)
}
}
func (t *Tracer) TraceRelease(pool *pgxpool.Pool, data pgxpool.TraceReleaseData) {
- for _, tracer := range t.PoolReleaseTracers {
- tracer.TraceRelease(pool, data)
+ for i := range t.PoolReleaseTracers {
+ t.PoolReleaseTracers[i].TraceRelease(pool, data)
}
}
diff --git a/named_args.go b/named_args.go
index c88991ee4..b9a397e3c 100644
--- a/named_args.go
+++ b/named_args.go
@@ -61,8 +61,8 @@ func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string,
}
sb := strings.Builder{}
- for _, p := range l.parts {
- switch p := p.(type) {
+ for i := range l.parts {
+ switch p := l.parts[i].(type) {
case string:
sb.WriteString(p)
case namedArg:
@@ -72,9 +72,9 @@ func rewriteQuery(na map[string]any, sql string, isStrict bool) (newSQL string,
}
newArgs = make([]any, len(l.nameToOrdinal))
- for name, ordinal := range l.nameToOrdinal {
+ for name := range l.nameToOrdinal {
var found bool
- newArgs[ordinal-1], found = na[string(name)]
+ newArgs[l.nameToOrdinal[name]-1], found = na[string(name)]
if isStrict && !found {
return "", nil, fmt.Errorf("argument %s found in sql query but not present in StrictNamedArgs", name)
}
diff --git a/pgconn/benchmark_test.go b/pgconn/benchmark_test.go
index dad0d57d6..3f2c20b29 100644
--- a/pgconn/benchmark_test.go
+++ b/pgconn/benchmark_test.go
@@ -234,7 +234,7 @@ func BenchmarkExecPreparedPossibleToCancel(b *testing.B) {
// b.ResetTimer()
-// for i := 0; i < b.N; i++ {
+// for b.Loop() {
// conn.ChanToSetDeadline().Watch(ctx)
// conn.ChanToSetDeadline().Ignore()
// }
diff --git a/pgconn/config.go b/pgconn/config.go
index d5914aad9..ed33f1996 100644
--- a/pgconn/config.go
+++ b/pgconn/config.go
@@ -7,7 +7,6 @@ import (
"encoding/pem"
"errors"
"fmt"
- "io"
"maps"
"math"
"net"
@@ -108,11 +107,11 @@ func (c *Config) Copy() *Config {
}
if newConf.Fallbacks != nil {
newConf.Fallbacks = make([]*FallbackConfig, len(c.Fallbacks))
- for i, fallback := range c.Fallbacks {
+ for i := range c.Fallbacks {
newFallback := new(FallbackConfig)
- *newFallback = *fallback
+ *newFallback = *c.Fallbacks[i]
if newFallback.TLSConfig != nil {
- newFallback.TLSConfig = fallback.TLSConfig.Clone()
+ newFallback.TLSConfig = c.Fallbacks[i].TLSConfig.Clone()
}
newConf.Fallbacks[i] = newFallback
}
@@ -289,9 +288,7 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
User: settings["user"],
Password: settings["password"],
RuntimeParams: make(map[string]string),
- BuildFrontend: func(r io.Reader, w io.Writer) *pgproto3.Frontend {
- return pgproto3.NewFrontend(r, w)
- },
+ BuildFrontend: pgproto3.NewFrontend,
BuildContextWatcherHandler: func(pgConn *PgConn) ctxwatch.Handler {
return &DeadlineContextWatcherHandler{Conn: pgConn.conn}
},
@@ -348,19 +345,20 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
config.KerberosSpn = settings["krbspn"]
}
- for k, v := range settings {
+ for k := range settings {
if _, present := notRuntimeParams[k]; present {
continue
}
- config.RuntimeParams[k] = v
+ config.RuntimeParams[k] = settings[k]
}
- fallbacks := []*FallbackConfig{}
-
- hosts := strings.Split(settings["host"], ",")
- ports := strings.Split(settings["port"], ",")
+ var (
+ fallbacks = []*FallbackConfig{}
+ hosts = strings.Split(settings["host"], ",")
+ ports = strings.Split(settings["port"], ",")
+ )
- for i, host := range hosts {
+ for i := range hosts {
var portStr string
if i < len(ports) {
portStr = ports[i]
@@ -376,21 +374,21 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
var tlsConfigs []*tls.Config
// Ignore TLS settings if Unix domain socket like libpq
- if network, _ := NetworkAddress(host, port); network == "unix" {
+ if network, _ := NetworkAddress(hosts[i], port); network == "unix" {
tlsConfigs = append(tlsConfigs, nil)
} else {
var err error
- tlsConfigs, err = configTLS(settings, host, options)
+ tlsConfigs, err = configTLS(settings, hosts[i], options)
if err != nil {
return nil, &ParseConfigError{ConnString: connString, msg: "failed to configure TLS", err: err}
}
}
- for _, tlsConfig := range tlsConfigs {
+ for j := range tlsConfigs {
fallbacks = append(fallbacks, &FallbackConfig{
- Host: host,
+ Host: hosts[i],
Port: port,
- TLSConfig: tlsConfig,
+ TLSConfig: tlsConfigs[j],
})
}
}
@@ -436,8 +434,8 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con
func mergeSettings(settingSets ...map[string]string) map[string]string {
settings := make(map[string]string)
- for _, s2 := range settingSets {
- maps.Copy(settings, s2)
+ for i := range settingSets {
+ maps.Copy(settings, settingSets[i])
}
return settings
@@ -469,10 +467,10 @@ func parseEnvSettings() map[string]string {
"PGOPTIONS": "options",
}
- for envname, realname := range nameMap {
+ for envname := range nameMap {
value := os.Getenv(envname)
if value != "" {
- settings[realname] = value
+ settings[nameMap[envname]] = value
}
}
@@ -553,22 +551,24 @@ func isIPOnly(host string) bool {
var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1}
func parseKeywordValueSettings(s string) (map[string]string, error) {
- settings := make(map[string]string)
- nameMap := map[string]string{
- "dbname": "database",
- }
+ var (
+ settings = make(map[string]string)
+ nameMap = map[string]string{"dbname": "database"}
+ )
- for len(s) > 0 {
- var key, val string
- eqIdx := strings.IndexRune(s, '=')
+ for s != "" {
+ var (
+ key, val string
+ eqIdx = strings.IndexRune(s, '=')
+ )
if eqIdx < 0 {
return nil, errors.New("invalid keyword/value")
}
key = strings.Trim(s[:eqIdx], " \t\n\r\v\f")
s = strings.TrimLeft(s[eqIdx+1:], " \t\n\r\v\f")
- if len(s) == 0 {
+ if s == "" {
} else if s[0] != '\'' {
end := 0
for ; end < len(s); end++ {
@@ -582,7 +582,7 @@ func parseKeywordValueSettings(s string) (map[string]string, error) {
}
}
}
- val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ val = strings.ReplaceAll(strings.ReplaceAll(s[:end], "\\\\", "\\"), "\\'", "'")
if end == len(s) {
s = ""
} else {
@@ -602,7 +602,7 @@ func parseKeywordValueSettings(s string) (map[string]string, error) {
if end == len(s) {
return nil, errors.New("unterminated quoted string in connection info string")
}
- val = strings.Replace(strings.Replace(s[:end], "\\\\", "\\", -1), "\\'", "'", -1)
+ val = strings.ReplaceAll(strings.ReplaceAll(s[:end], "\\\\", "\\"), "\\'", "'")
if end == len(s) {
s = ""
} else {
@@ -654,14 +654,17 @@ func parseServiceSettings(servicefilePath, serviceName string) (map[string]strin
// necessary to allow returning multiple TLS configs as sslmode "allow" and
// "prefer" allow fallback.
func configTLS(settings map[string]string, thisHost string, parseConfigOptions ParseConfigOptions) ([]*tls.Config, error) {
- host := thisHost
- sslmode := settings["sslmode"]
- sslrootcert := settings["sslrootcert"]
- sslcert := settings["sslcert"]
- sslkey := settings["sslkey"]
- sslpassword := settings["sslpassword"]
- sslsni := settings["sslsni"]
- sslnegotiation := settings["sslnegotiation"]
+
+ var (
+ host = thisHost
+ sslmode = settings["sslmode"]
+ sslrootcert = settings["sslrootcert"]
+ sslcert = settings["sslcert"]
+ sslkey = settings["sslkey"]
+ sslpassword = settings["sslpassword"]
+ sslsni = settings["sslsni"]
+ sslnegotiation = settings["sslnegotiation"]
+ )
// Match libpq default behavior
if sslmode == "" {
@@ -740,8 +743,8 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
tlsConfig.InsecureSkipVerify = true
tlsConfig.VerifyPeerCertificate = func(certificates [][]byte, _ [][]*x509.Certificate) error {
certs := make([]*x509.Certificate, len(certificates))
- for i, asn1Data := range certificates {
- cert, err := x509.ParseCertificate(asn1Data)
+ for i := range certificates {
+ cert, err := x509.ParseCertificate(certificates[i])
if err != nil {
return errors.New("failed to parse certificate from server: " + err.Error())
}
@@ -755,8 +758,8 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
}
// Skip the first cert because it's the leaf. All others
// are intermediates.
- for _, cert := range certs[1:] {
- opts.Intermediates.AddCert(cert)
+ for i := range certs[1:] {
+ opts.Intermediates.AddCert(certs[1:][i])
}
_, err := certs[0].Verify(opts)
return err
@@ -780,9 +783,11 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
if block == nil {
return nil, errors.New("failed to decode sslkey")
}
- var pemKey []byte
- var decryptedKey []byte
- var decryptedError error
+ var (
+ pemKey []byte
+ decryptedKey []byte
+ decryptedError error
+ )
// If PEM is encrypted, attempt to decrypt using pass phrase
if x509.IsEncryptedPEMBlock(block) {
// Attempt decryption with pass phrase
diff --git a/pgconn/defaults.go b/pgconn/defaults.go
index 1dd514ff4..c5c4dd87f 100644
--- a/pgconn/defaults.go
+++ b/pgconn/defaults.go
@@ -18,13 +18,13 @@ func defaultSettings() map[string]string {
// Default to the OS user name. Purposely ignoring err getting user name from
// OS. The client application will simply have to specify the user in that
// case (which they typically will be doing anyway).
- user, err := user.Current()
+ userVar, err := user.Current()
if err == nil {
- settings["user"] = user.Username
- settings["passfile"] = filepath.Join(user.HomeDir, ".pgpass")
- settings["servicefile"] = filepath.Join(user.HomeDir, ".pg_service.conf")
- sslcert := filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt")
- sslkey := filepath.Join(user.HomeDir, ".postgresql", "postgresql.key")
+ settings["user"] = userVar.Username
+ settings["passfile"] = filepath.Join(userVar.HomeDir, ".pgpass")
+ settings["servicefile"] = filepath.Join(userVar.HomeDir, ".pg_service.conf")
+ sslcert := filepath.Join(userVar.HomeDir, ".postgresql", "postgresql.crt")
+ sslkey := filepath.Join(userVar.HomeDir, ".postgresql", "postgresql.key")
if _, err := os.Stat(sslcert); err == nil {
if _, err := os.Stat(sslkey); err == nil {
// Both the cert and key must be present to use them, or do not use either
@@ -32,7 +32,7 @@ func defaultSettings() map[string]string {
settings["sslkey"] = sslkey
}
}
- sslrootcert := filepath.Join(user.HomeDir, ".postgresql", "root.crt")
+ sslrootcert := filepath.Join(userVar.HomeDir, ".postgresql", "root.crt")
if _, err := os.Stat(sslrootcert); err == nil {
settings["sslrootcert"] = sslrootcert
}
@@ -53,9 +53,9 @@ func defaultHost() string {
"/tmp", // standard PostgreSQL
}
- for _, path := range candidatePaths {
- if _, err := os.Stat(path); err == nil {
- return path
+ for i := range candidatePaths {
+ if _, err := os.Stat(candidatePaths[i]); err == nil {
+ return candidatePaths[i]
}
}
diff --git a/pgconn/errors.go b/pgconn/errors.go
index bc1e31e31..a72542408 100644
--- a/pgconn/errors.go
+++ b/pgconn/errors.go
@@ -138,13 +138,17 @@ func (e *ParseConfigError) Unwrap() error {
func normalizeTimeoutError(ctx context.Context, err error) error {
var netErr net.Error
if errors.As(err, &netErr) && netErr.Timeout() {
- if ctx.Err() == context.Canceled {
+ switch {
+ case ctx.Err() == context.Canceled:
// Since the timeout was caused by a context cancellation, the actual error is context.Canceled not the timeout error.
return context.Canceled
- } else if ctx.Err() == context.DeadlineExceeded {
+
+ case ctx.Err() == context.DeadlineExceeded:
return &errTimeout{err: ctx.Err()}
- } else {
+
+ default:
return &errTimeout{err: netErr}
+
}
}
return err
diff --git a/pgconn/internal/bgreader/bgreader_test.go b/pgconn/internal/bgreader/bgreader_test.go
index 3a8aa00f6..303502135 100644
--- a/pgconn/internal/bgreader/bgreader_test.go
+++ b/pgconn/internal/bgreader/bgreader_test.go
@@ -49,9 +49,9 @@ func (r *mockReader) Read(p []byte) (int, error) {
func TestBGReaderReadWaitsForBackgroundRead(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
- func(p []byte) (int, error) { time.Sleep(1 * time.Second); return copy(p, []byte("foo")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("baz")), nil },
+ func(p []byte) (int, error) { time.Sleep(1 * time.Second); return copy(p, `"foo"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"bar"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"baz"`), nil },
},
}
bgr := bgreader.New(rr)
@@ -60,15 +60,15 @@ func TestBGReaderReadWaitsForBackgroundRead(t *testing.T) {
n, err := bgr.Read(buf)
require.NoError(t, err)
require.EqualValues(t, 3, n)
- require.Equal(t, []byte("foo"), buf)
+ require.Equal(t, `"foo"`, buf)
}
func TestBGReaderErrorWhenStarted(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
- func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
+ func(p []byte) (int, error) { return copy(p, `"foo"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"bar"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"baz"`), errors.New("oops") },
},
}
@@ -82,9 +82,9 @@ func TestBGReaderErrorWhenStarted(t *testing.T) {
func TestBGReaderErrorWhenStopped(t *testing.T) {
rr := &mockReader{
readFuncs: []mockReadFunc{
- func(p []byte) (int, error) { return copy(p, []byte("foo")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("bar")), nil },
- func(p []byte) (int, error) { return copy(p, []byte("baz")), errors.New("oops") },
+ func(p []byte) (int, error) { return copy(p, `"foo"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"bar"`), nil },
+ func(p []byte) (int, error) { return copy(p, `"baz"`), errors.New("oops") },
},
}
diff --git a/pgconn/pgconn.go b/pgconn/pgconn.go
index 081e20578..1dddcdf81 100644
--- a/pgconn/pgconn.go
+++ b/pgconn/pgconn.go
@@ -190,28 +190,28 @@ func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneC
var allErrors []error
- for _, fb := range fallbackConfigs {
+ for i := range fallbackConfigs {
// skip resolve for unix sockets
- if isAbsolutePath(fb.Host) {
- network, address := NetworkAddress(fb.Host, fb.Port)
+ if isAbsolutePath(fallbackConfigs[i].Host) {
+ network, address := NetworkAddress(fallbackConfigs[i].Host, fallbackConfigs[i].Port)
configs = append(configs, &connectOneConfig{
network: network,
address: address,
- originalHostname: fb.Host,
- tlsConfig: fb.TLSConfig,
+ originalHostname: fallbackConfigs[i].Host,
+ tlsConfig: fallbackConfigs[i].TLSConfig,
})
continue
}
- ips, err := config.LookupFunc(ctx, fb.Host)
+ ips, err := config.LookupFunc(ctx, fallbackConfigs[i].Host)
if err != nil {
allErrors = append(allErrors, err)
continue
}
- for _, ip := range ips {
- splitIP, splitPort, err := net.SplitHostPort(ip)
+ for j := range ips {
+ splitIP, splitPort, err := net.SplitHostPort(ips[j])
if err == nil {
port, err := strconv.ParseUint(splitPort, 10, 16)
if err != nil {
@@ -221,16 +221,16 @@ func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneC
configs = append(configs, &connectOneConfig{
network: network,
address: address,
- originalHostname: fb.Host,
- tlsConfig: fb.TLSConfig,
+ originalHostname: fallbackConfigs[i].Host,
+ tlsConfig: fallbackConfigs[i].TLSConfig,
})
} else {
- network, address := NetworkAddress(ip, fb.Port)
+ network, address := NetworkAddress(ips[j], fallbackConfigs[i].Port)
configs = append(configs, &connectOneConfig{
network: network,
address: address,
- originalHostname: fb.Host,
- tlsConfig: fb.TLSConfig,
+ originalHostname: fallbackConfigs[i].Host,
+ tlsConfig: fallbackConfigs[i].TLSConfig,
})
}
}
@@ -243,11 +243,14 @@ func buildConnectOneConfigs(ctx context.Context, config *Config) ([]*connectOneC
// order. If a connection is successful it is returned. If no connection is successful then all errors are returned. If
// a connection attempt returns a [NotPreferredError], then that host will be used if no other hosts are successful.
func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*connectOneConfig) (*PgConn, []error) {
- octx := ctx
- var allErrors []error
- var fallbackConnectOneConfig *connectOneConfig
- for i, c := range connectOneConfigs {
+ var (
+ octx = ctx
+ allErrors []error
+ fallbackConnectOneConfig *connectOneConfig
+ )
+
+ for i := range connectOneConfigs {
// ConnectTimeout restricts the whole connection process.
if config.ConnectTimeout != 0 {
// create new context first time or when previous host was different
@@ -260,7 +263,7 @@ func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*
ctx = octx
}
- pgConn, err := connectOne(ctx, config, c, false)
+ pgConn, err := connectOne(ctx, config, connectOneConfigs[i], false)
if pgConn != nil {
return pgConn, nil
}
@@ -272,9 +275,11 @@ func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*
// pgx will try next host even if libpq does not in certain cases (see #2246)
// consider change for the next major version
- const ERRCODE_INVALID_PASSWORD = "28P01"
- const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
- const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
+ const (
+ ERRCODE_INVALID_PASSWORD = "28P01"
+ ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist
+ ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege
+ )
// auth failed due to invalid password, db does not exist or user has no permission
if pgErr.Code == ERRCODE_INVALID_PASSWORD ||
@@ -286,7 +291,7 @@ func connectPreferred(ctx context.Context, config *Config, connectOneConfigs []*
var npErr *NotPreferredError
if errors.As(err, &npErr) {
- fallbackConnectOneConfig = c
+ fallbackConnectOneConfig = connectOneConfigs[i]
}
}
@@ -718,7 +723,7 @@ func (pgConn *PgConn) asyncClose() {
//
// This is only likely to be useful to connection pools. It gives them a way avoid establishing a new connection while
// an old connection is still being cleaned up and thereby exceeding the maximum pool size.
-func (pgConn *PgConn) CleanupDone() chan (struct{}) {
+func (pgConn *PgConn) CleanupDone() chan struct{} {
return pgConn.cleanupDone
}
@@ -792,8 +797,8 @@ func (ct CommandTag) RowsAffected() int64 {
}
var n int64
- for _, b := range ct.s[idx:] {
- n = n*10 + int64(b-'0')
+ for i := range ct.s[idx:] {
+ n = n*10 + int64(ct.s[idx:][i]-'0')
}
return n
@@ -1846,7 +1851,7 @@ func (pgConn *PgConn) EscapeString(s string) (string, error) {
return "", errors.New("EscapeString must be run with client_encoding=UTF8")
}
- return strings.Replace(s, "'", "''", -1), nil
+ return strings.ReplaceAll(s, "'", "''"), nil
}
// CheckConn checks the underlying connection without writing any bytes. This is currently implemented by doing a read
diff --git a/pgconn/pgconn_stress_test.go b/pgconn/pgconn_stress_test.go
index 5cbc7c2d4..d2e2b6305 100644
--- a/pgconn/pgconn_stress_test.go
+++ b/pgconn/pgconn_stress_test.go
@@ -36,7 +36,7 @@ func TestConnStress(t *testing.T) {
{"Batch", stressBatch},
}
- for i := 0; i < actionCount; i++ {
+ for i := range actionCount {
action := actions[rand.IntN(len(actions))]
err := action.fn(pgConn)
require.Nilf(t, err, "%d: %s", i, action.name)
diff --git a/pgproto3/authentication_sasl.go b/pgproto3/authentication_sasl.go
index e66580f44..93cb8e822 100644
--- a/pgproto3/authentication_sasl.go
+++ b/pgproto3/authentication_sasl.go
@@ -51,8 +51,8 @@ func (src *AuthenticationSASL) Encode(dst []byte) ([]byte, error) {
dst, sp := beginMessage(dst, 'R')
dst = pgio.AppendUint32(dst, AuthTypeSASL)
- for _, s := range src.AuthMechanisms {
- dst = append(dst, []byte(s)...)
+ for i := range src.AuthMechanisms {
+ dst = append(dst, []byte(src.AuthMechanisms[i])...)
dst = append(dst, 0)
}
dst = append(dst, 0)
diff --git a/pgproto3/bind.go b/pgproto3/bind.go
index 13700c39c..158725562 100644
--- a/pgproto3/bind.go
+++ b/pgproto3/bind.go
@@ -122,40 +122,40 @@ func (src *Bind) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many parameter format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ParameterFormatCodes)))
- for _, fc := range src.ParameterFormatCodes {
- dst = pgio.AppendInt16(dst, fc)
+ for i := range src.ParameterFormatCodes {
+ dst = pgio.AppendInt16(dst, src.ParameterFormatCodes[i])
}
if len(src.Parameters) > math.MaxUint16 {
return nil, errors.New("too many parameters")
}
dst = pgio.AppendUint16(dst, uint16(len(src.Parameters)))
- for _, p := range src.Parameters {
- if p == nil {
+ for i := range src.Parameters {
+ if src.Parameters[i] == nil {
dst = pgio.AppendInt32(dst, -1)
continue
}
- dst = pgio.AppendInt32(dst, int32(len(p)))
- dst = append(dst, p...)
+ dst = pgio.AppendInt32(dst, int32(len(src.Parameters[i])))
+ dst = append(dst, src.Parameters[i]...)
}
if len(src.ResultFormatCodes) > math.MaxUint16 {
return nil, errors.New("too many result format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ResultFormatCodes)))
- for _, fc := range src.ResultFormatCodes {
- dst = pgio.AppendInt16(dst, fc)
+ for i := range src.ResultFormatCodes {
+ dst = pgio.AppendInt16(dst, src.ResultFormatCodes[i])
}
return finishMessage(dst, sp)
}
// MarshalJSON implements encoding/json.Marshaler.
-func (src Bind) MarshalJSON() ([]byte, error) {
+func (src *Bind) MarshalJSON() ([]byte, error) {
formattedParameters := make([]map[string]string, len(src.Parameters))
- for i, p := range src.Parameters {
- if p == nil {
+ for i := range src.Parameters {
+ if src.Parameters[i] == nil {
continue
}
@@ -167,9 +167,9 @@ func (src Bind) MarshalJSON() ([]byte, error) {
}
if textFormat {
- formattedParameters[i] = map[string]string{"text": string(p)}
+ formattedParameters[i] = map[string]string{"text": string(src.Parameters[i])}
} else {
- formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(p)}
+ formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(src.Parameters[i])}
}
}
@@ -213,10 +213,10 @@ func (dst *Bind) UnmarshalJSON(data []byte) error {
dst.ParameterFormatCodes = msg.ParameterFormatCodes
dst.Parameters = make([][]byte, len(msg.Parameters))
dst.ResultFormatCodes = msg.ResultFormatCodes
- for n, parameter := range msg.Parameters {
- dst.Parameters[n], err = getValueFromJSON(parameter)
+ for i := range msg.Parameters {
+ dst.Parameters[i], err = getValueFromJSON(msg.Parameters[i])
if err != nil {
- return fmt.Errorf("cannot get param %d: %w", n, err)
+ return fmt.Errorf("cannot get param %d: %w", i, err)
}
}
return nil
diff --git a/pgproto3/chunkreader_test.go b/pgproto3/chunkreader_test.go
index cb8b2e459..f877a0b86 100644
--- a/pgproto3/chunkreader_test.go
+++ b/pgproto3/chunkreader_test.go
@@ -53,7 +53,7 @@ type randomReader struct {
// Read reads a random number of random bytes.
func (r *randomReader) Read(p []byte) (n int, err error) {
n = r.rnd.IntN(len(p) + 1)
- for i := 0; i < n; i++ {
+ for i := range n {
p[i] = byte(r.rnd.Uint64())
}
return n, nil
diff --git a/pgproto3/copy_both_response.go b/pgproto3/copy_both_response.go
index e2a402f9a..ed36a1931 100644
--- a/pgproto3/copy_both_response.go
+++ b/pgproto3/copy_both_response.go
@@ -52,8 +52,8 @@ func (src *CopyBothResponse) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many column format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
- for _, fc := range src.ColumnFormatCodes {
- dst = pgio.AppendUint16(dst, fc)
+ for i := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, src.ColumnFormatCodes[i])
}
return finishMessage(dst, sp)
diff --git a/pgproto3/copy_in_response.go b/pgproto3/copy_in_response.go
index 0633935b9..8ff06830b 100644
--- a/pgproto3/copy_in_response.go
+++ b/pgproto3/copy_in_response.go
@@ -53,8 +53,8 @@ func (src *CopyInResponse) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many column format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
- for _, fc := range src.ColumnFormatCodes {
- dst = pgio.AppendUint16(dst, fc)
+ for i := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, src.ColumnFormatCodes[i])
}
return finishMessage(dst, sp)
diff --git a/pgproto3/copy_out_response.go b/pgproto3/copy_out_response.go
index 006864ac8..cfd0fcebc 100644
--- a/pgproto3/copy_out_response.go
+++ b/pgproto3/copy_out_response.go
@@ -53,8 +53,8 @@ func (src *CopyOutResponse) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many column format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
- for _, fc := range src.ColumnFormatCodes {
- dst = pgio.AppendUint16(dst, fc)
+ for i := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, src.ColumnFormatCodes[i])
}
return finishMessage(dst, sp)
diff --git a/pgproto3/data_row.go b/pgproto3/data_row.go
index 54418d58c..ea07d3615 100644
--- a/pgproto3/data_row.go
+++ b/pgproto3/data_row.go
@@ -69,14 +69,14 @@ func (src *DataRow) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many values")
}
dst = pgio.AppendUint16(dst, uint16(len(src.Values)))
- for _, v := range src.Values {
- if v == nil {
+ for i := range src.Values {
+ if src.Values[i] == nil {
dst = pgio.AppendInt32(dst, -1)
continue
}
- dst = pgio.AppendInt32(dst, int32(len(v)))
- dst = append(dst, v...)
+ dst = pgio.AppendInt32(dst, int32(len(src.Values[i])))
+ dst = append(dst, src.Values[i]...)
}
return finishMessage(dst, sp)
@@ -85,23 +85,23 @@ func (src *DataRow) Encode(dst []byte) ([]byte, error) {
// MarshalJSON implements encoding/json.Marshaler.
func (src DataRow) MarshalJSON() ([]byte, error) {
formattedValues := make([]map[string]string, len(src.Values))
- for i, v := range src.Values {
- if v == nil {
+ for i := range src.Values {
+ if src.Values[i] == nil {
continue
}
var hasNonPrintable bool
- for _, b := range v {
- if b < 32 {
+ for j := range src.Values[i] {
+ if src.Values[i][j] < 32 {
hasNonPrintable = true
break
}
}
if hasNonPrintable {
- formattedValues[i] = map[string]string{"binary": hex.EncodeToString(v)}
+ formattedValues[i] = map[string]string{"binary": hex.EncodeToString(src.Values[i])}
} else {
- formattedValues[i] = map[string]string{"text": string(v)}
+ formattedValues[i] = map[string]string{"text": string(src.Values[i])}
}
}
@@ -129,9 +129,9 @@ func (dst *DataRow) UnmarshalJSON(data []byte) error {
}
dst.Values = make([][]byte, len(msg.Values))
- for n, parameter := range msg.Values {
+ for i := range msg.Values {
var err error
- dst.Values[n], err = getValueFromJSON(parameter)
+ dst.Values[i], err = getValueFromJSON(msg.Values[i])
if err != nil {
return err
}
diff --git a/pgproto3/error_response.go b/pgproto3/error_response.go
index 6ef9bd061..033b2e01a 100644
--- a/pgproto3/error_response.go
+++ b/pgproto3/error_response.go
@@ -208,9 +208,9 @@ func (src *ErrorResponse) appendFields(dst []byte) []byte {
dst = append(dst, 0)
}
- for k, v := range src.UnknownFields {
- dst = append(dst, k)
- dst = append(dst, v...)
+ for i := range src.UnknownFields {
+ dst = append(dst, i)
+ dst = append(dst, src.UnknownFields[i]...)
dst = append(dst, 0)
}
@@ -220,7 +220,7 @@ func (src *ErrorResponse) appendFields(dst []byte) []byte {
}
// MarshalJSON implements encoding/json.Marshaler.
-func (src ErrorResponse) MarshalJSON() ([]byte, error) {
+func (src *ErrorResponse) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type string
Severity string
diff --git a/pgproto3/function_call.go b/pgproto3/function_call.go
index affb713f5..bd7f295a5 100644
--- a/pgproto3/function_call.go
+++ b/pgproto3/function_call.go
@@ -81,20 +81,20 @@ func (src *FunctionCall) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many arg format codes")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ArgFormatCodes)))
- for _, argFormatCode := range src.ArgFormatCodes {
- dst = pgio.AppendUint16(dst, argFormatCode)
+ for i := range src.ArgFormatCodes {
+ dst = pgio.AppendUint16(dst, src.ArgFormatCodes[i])
}
if len(src.Arguments) > math.MaxUint16 {
return nil, errors.New("too many arguments")
}
dst = pgio.AppendUint16(dst, uint16(len(src.Arguments)))
- for _, argument := range src.Arguments {
- if argument == nil {
+ for i := range src.Arguments {
+ if src.Arguments[i] == nil {
dst = pgio.AppendInt32(dst, -1)
} else {
- dst = pgio.AppendInt32(dst, int32(len(argument)))
- dst = append(dst, argument...)
+ dst = pgio.AppendInt32(dst, int32(len(src.Arguments[i])))
+ dst = append(dst, src.Arguments[i]...)
}
}
dst = pgio.AppendUint16(dst, src.ResultFormatCode)
diff --git a/pgproto3/function_call_response.go b/pgproto3/function_call_response.go
index 1f2734952..a3071207f 100644
--- a/pgproto3/function_call_response.go
+++ b/pgproto3/function_call_response.go
@@ -54,10 +54,12 @@ func (src *FunctionCallResponse) Encode(dst []byte) ([]byte, error) {
// MarshalJSON implements encoding/json.Marshaler.
func (src FunctionCallResponse) MarshalJSON() ([]byte, error) {
- var formattedValue map[string]string
- var hasNonPrintable bool
- for _, b := range src.Result {
- if b < 32 {
+ var (
+ formattedValue map[string]string
+ hasNonPrintable bool
+ )
+ for i := range src.Result {
+ if src.Result[i] < 32 {
hasNonPrintable = true
break
}
diff --git a/pgproto3/parameter_description.go b/pgproto3/parameter_description.go
index 58eb26ef0..38733513c 100644
--- a/pgproto3/parameter_description.go
+++ b/pgproto3/parameter_description.go
@@ -48,8 +48,8 @@ func (src *ParameterDescription) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many parameter oids")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
- for _, oid := range src.ParameterOIDs {
- dst = pgio.AppendUint32(dst, oid)
+ for i := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, src.ParameterOIDs[i])
}
return finishMessage(dst, sp)
diff --git a/pgproto3/parse.go b/pgproto3/parse.go
index 8fb8de5d4..0695e9d20 100644
--- a/pgproto3/parse.go
+++ b/pgproto3/parse.go
@@ -66,8 +66,8 @@ func (src *Parse) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many parameter oids")
}
dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
- for _, oid := range src.ParameterOIDs {
- dst = pgio.AppendUint32(dst, oid)
+ for i := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, src.ParameterOIDs[i])
}
return finishMessage(dst, sp)
diff --git a/pgproto3/row_description.go b/pgproto3/row_description.go
index b46f510dc..5bd3dcf65 100644
--- a/pgproto3/row_description.go
+++ b/pgproto3/row_description.go
@@ -107,16 +107,16 @@ func (src *RowDescription) Encode(dst []byte) ([]byte, error) {
return nil, errors.New("too many fields")
}
dst = pgio.AppendUint16(dst, uint16(len(src.Fields)))
- for _, fd := range src.Fields {
- dst = append(dst, fd.Name...)
+ for i := range src.Fields {
+ dst = append(dst, src.Fields[i].Name...)
dst = append(dst, 0)
- dst = pgio.AppendUint32(dst, fd.TableOID)
- dst = pgio.AppendUint16(dst, fd.TableAttributeNumber)
- dst = pgio.AppendUint32(dst, fd.DataTypeOID)
- dst = pgio.AppendInt16(dst, fd.DataTypeSize)
- dst = pgio.AppendInt32(dst, fd.TypeModifier)
- dst = pgio.AppendInt16(dst, fd.Format)
+ dst = pgio.AppendUint32(dst, src.Fields[i].TableOID)
+ dst = pgio.AppendUint16(dst, src.Fields[i].TableAttributeNumber)
+ dst = pgio.AppendUint32(dst, src.Fields[i].DataTypeOID)
+ dst = pgio.AppendInt16(dst, src.Fields[i].DataTypeSize)
+ dst = pgio.AppendInt32(dst, src.Fields[i].TypeModifier)
+ dst = pgio.AppendInt16(dst, src.Fields[i].Format)
}
return finishMessage(dst, sp)
@@ -150,15 +150,15 @@ func (dst *RowDescription) UnmarshalJSON(data []byte) error {
return err
}
dst.Fields = make([]FieldDescription, len(msg.Fields))
- for n, field := range msg.Fields {
- dst.Fields[n] = FieldDescription{
- Name: []byte(field.Name),
- TableOID: field.TableOID,
- TableAttributeNumber: field.TableAttributeNumber,
- DataTypeOID: field.DataTypeOID,
- DataTypeSize: field.DataTypeSize,
- TypeModifier: field.TypeModifier,
- Format: field.Format,
+ for i := range msg.Fields {
+ dst.Fields[i] = FieldDescription{
+ Name: []byte(msg.Fields[i].Name),
+ TableOID: msg.Fields[i].TableOID,
+ TableAttributeNumber: msg.Fields[i].TableAttributeNumber,
+ DataTypeOID: msg.Fields[i].DataTypeOID,
+ DataTypeSize: msg.Fields[i].DataTypeSize,
+ TypeModifier: msg.Fields[i].TypeModifier,
+ Format: msg.Fields[i].Format,
}
}
return nil
diff --git a/pgproto3/startup_message.go b/pgproto3/startup_message.go
index 3af4587d8..a50add175 100644
--- a/pgproto3/startup_message.go
+++ b/pgproto3/startup_message.go
@@ -69,10 +69,10 @@ func (src *StartupMessage) Encode(dst []byte) ([]byte, error) {
dst = pgio.AppendInt32(dst, -1)
dst = pgio.AppendUint32(dst, src.ProtocolVersion)
- for k, v := range src.Parameters {
- dst = append(dst, k...)
+ for i := range src.Parameters {
+ dst = append(dst, i...)
dst = append(dst, 0)
- dst = append(dst, v...)
+ dst = append(dst, src.Parameters[i]...)
dst = append(dst, 0)
}
dst = append(dst, 0)
diff --git a/pgproto3/trace.go b/pgproto3/trace.go
index 6cc7d3e36..f0b49692d 100644
--- a/pgproto3/trace.go
+++ b/pgproto3/trace.go
@@ -171,16 +171,16 @@ func (t *tracer) traceBackendKeyData(sender byte, encodedLen int32, msg *Backend
func (t *tracer) traceBind(sender byte, encodedLen int32, msg *Bind) {
t.writeTrace(sender, encodedLen, "Bind", func() {
fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.DestinationPortal)), traceDoubleQuotedString([]byte(msg.PreparedStatement)), len(msg.ParameterFormatCodes))
- for _, fc := range msg.ParameterFormatCodes {
- fmt.Fprintf(t.buf, " %d", fc)
+ for i := range msg.ParameterFormatCodes {
+ fmt.Fprintf(t.buf, " %d", msg.ParameterFormatCodes[i])
}
fmt.Fprintf(t.buf, " %d", len(msg.Parameters))
- for _, p := range msg.Parameters {
- fmt.Fprintf(t.buf, " %s", traceSingleQuotedString(p))
+ for i := range msg.Parameters {
+ fmt.Fprintf(t.buf, " %s", traceSingleQuotedString(msg.Parameters[i]))
}
fmt.Fprintf(t.buf, " %d", len(msg.ResultFormatCodes))
- for _, fc := range msg.ResultFormatCodes {
- fmt.Fprintf(t.buf, " %d", fc)
+ for i := range msg.ResultFormatCodes {
+ fmt.Fprintf(t.buf, " %d", msg.ResultFormatCodes[i])
}
})
}
@@ -236,11 +236,11 @@ func (t *tracer) traceCopyOutResponse(sender byte, encodedLen int32, msg *CopyOu
func (t *tracer) traceDataRow(sender byte, encodedLen int32, msg *DataRow) {
t.writeTrace(sender, encodedLen, "DataRow", func() {
fmt.Fprintf(t.buf, "\t %d", len(msg.Values))
- for _, v := range msg.Values {
- if v == nil {
+ for i := range msg.Values {
+ if msg.Values[i] == nil {
t.buf.WriteString(" -1")
} else {
- fmt.Fprintf(t.buf, " %d %s", len(v), traceSingleQuotedString(v))
+ fmt.Fprintf(t.buf, " %d %s", len(msg.Values[i]), traceSingleQuotedString(msg.Values[i]))
}
}
})
@@ -309,8 +309,8 @@ func (t *tracer) traceParameterStatus(sender byte, encodedLen int32, msg *Parame
func (t *tracer) traceParse(sender byte, encodedLen int32, msg *Parse) {
t.writeTrace(sender, encodedLen, "Parse", func() {
fmt.Fprintf(t.buf, "\t %s %s %d", traceDoubleQuotedString([]byte(msg.Name)), traceDoubleQuotedString([]byte(msg.Query)), len(msg.ParameterOIDs))
- for _, oid := range msg.ParameterOIDs {
- fmt.Fprintf(t.buf, " %d", oid)
+ for i := range msg.ParameterOIDs {
+ fmt.Fprintf(t.buf, " %d", msg.ParameterOIDs[i])
}
})
}
@@ -338,8 +338,8 @@ func (t *tracer) traceReadyForQuery(sender byte, encodedLen int32, msg *ReadyFor
func (t *tracer) traceRowDescription(sender byte, encodedLen int32, msg *RowDescription) {
t.writeTrace(sender, encodedLen, "RowDescription", func() {
fmt.Fprintf(t.buf, "\t %d", len(msg.Fields))
- for _, fd := range msg.Fields {
- fmt.Fprintf(t.buf, ` %s %d %d %d %d %d %d`, traceDoubleQuotedString(fd.Name), fd.TableOID, fd.TableAttributeNumber, fd.DataTypeOID, fd.DataTypeSize, fd.TypeModifier, fd.Format)
+ for i := range msg.Fields {
+ fmt.Fprintf(t.buf, ` %s %d %d %d %d %d %d`, traceDoubleQuotedString(msg.Fields[i].Name), msg.Fields[i].TableOID, msg.Fields[i].TableAttributeNumber, msg.Fields[i].DataTypeOID, msg.Fields[i].DataTypeSize, msg.Fields[i].TypeModifier, msg.Fields[i].Format)
}
})
}
@@ -403,11 +403,11 @@ func traceSingleQuotedString(buf []byte) string {
sb := &strings.Builder{}
sb.WriteByte('\'')
- for _, b := range buf {
- if b < 32 || b > 126 {
- fmt.Fprintf(sb, `\x%x`, b)
+ for i := range buf {
+ if buf[i] < 32 || buf[i] > 126 {
+ fmt.Fprintf(sb, `\x%x`, buf[i])
} else {
- sb.WriteByte(b)
+ sb.WriteByte(buf[i])
}
}
sb.WriteByte('\'')
diff --git a/pgtype/array.go b/pgtype/array.go
index 872a08891..bb2415f8e 100644
--- a/pgtype/array.go
+++ b/pgtype/array.go
@@ -34,8 +34,8 @@ func cardinality(dimensions []ArrayDimension) int {
}
elementCount := int(dimensions[0].Length)
- for _, d := range dimensions[1:] {
- elementCount *= int(d.Length)
+ for i := range dimensions[1:] {
+ elementCount *= int(dimensions[1:][i].Length)
}
return elementCount
@@ -334,8 +334,8 @@ func arrayParseInteger(buf *bytes.Buffer) (int32, error) {
func encodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
var customDimensions bool
- for _, dim := range dimensions {
- if dim.LowerBound != 1 {
+ for i := range dimensions {
+ if dimensions[i].LowerBound != 1 {
customDimensions = true
}
}
@@ -344,11 +344,11 @@ func encodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
return buf
}
- for _, dim := range dimensions {
+ for i := range dimensions {
buf = append(buf, '[')
- buf = append(buf, strconv.FormatInt(int64(dim.LowerBound), 10)...)
+ buf = append(buf, strconv.FormatInt(int64(dimensions[i].LowerBound), 10)...)
buf = append(buf, ':')
- buf = append(buf, strconv.FormatInt(int64(dim.LowerBound+dim.Length-1), 10)...)
+ buf = append(buf, strconv.FormatInt(int64(dimensions[i].LowerBound+dimensions[i].Length-1), 10)...)
buf = append(buf, ']')
}
diff --git a/pgtype/array_codec.go b/pgtype/array_codec.go
index 2a3c50cab..1931e24cf 100644
--- a/pgtype/array_codec.go
+++ b/pgtype/array_codec.go
@@ -123,8 +123,8 @@ func (p *encodePlanArrayCodecText) Encode(value any, buf []byte) (newBuf []byte,
buf = append(buf, ',')
}
- for _, dec := range dimElemCounts {
- if i%dec == 0 {
+ for j := range dimElemCounts {
+ if i%dimElemCounts[j] == 0 {
buf = append(buf, '{')
}
}
@@ -152,8 +152,8 @@ func (p *encodePlanArrayCodecText) Encode(value any, buf []byte) (newBuf []byte,
buf = append(buf, quoteArrayElementIfNeeded(string(elemBuf))...)
}
- for _, dec := range dimElemCounts {
- if (i+1)%dec == 0 {
+ for j := range dimElemCounts {
+ if (i+1)%dimElemCounts[j] == 0 {
buf = append(buf, '}')
}
}
@@ -308,11 +308,11 @@ func (c *ArrayCodec) decodeText(m *Map, arrayOID uint32, src []byte, array Array
elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, array.ScanIndex(0))
}
- for i, s := range uta.Elements {
+ for i := range uta.Elements {
elem := array.ScanIndex(i)
var elemSrc []byte
- if s != "NULL" || uta.Quoted[i] {
- elemSrc = []byte(s)
+ if uta.Elements[i] != "NULL" || uta.Quoted[i] {
+ elemSrc = []byte(uta.Elements[i])
}
err = elementScanPlan.Scan(elemSrc, elem)
@@ -386,15 +386,15 @@ func isRagged(slice reflect.Value) bool {
return false
}
- sliceLen := slice.Len()
- innerLen := 0
+ var (
+ sliceLen = slice.Len()
+ innerLen = 0
+ )
for i := range sliceLen {
if i == 0 {
innerLen = slice.Index(i).Len()
- } else {
- if slice.Index(i).Len() != innerLen {
- return true
- }
+ } else if slice.Index(i).Len() != innerLen {
+ return true
}
if isRagged(slice.Index(i)) {
return true
diff --git a/pgtype/bits.go b/pgtype/bits.go
index 2a48e3549..25ec25c56 100644
--- a/pgtype/bits.go
+++ b/pgtype/bits.go
@@ -115,7 +115,7 @@ func (encodePlanBitsCodecText) Encode(value any, buf []byte) (newBuf []byte, err
return nil, nil
}
- for i := int32(0); i < bits.Len; i++ {
+ for i := range bits.Len {
byteIdx := i / 8
bitMask := byte(128 >> byte(i%8))
char := byte('0')
@@ -199,11 +199,11 @@ func (scanPlanTextAnyToBitsScanner) Scan(src []byte, dst any) error {
}
buf := make([]byte, byteLen)
- for i, b := range src {
- if b == '1' {
+ for i := range src {
+ if src[i] == '1' {
byteIdx := i / 8
bitIdx := uint(i % 8)
- buf[byteIdx] = buf[byteIdx] | (128 >> bitIdx)
+ buf[byteIdx] |= (128 >> bitIdx)
}
}
diff --git a/pgtype/bits_test.go b/pgtype/bits_test.go
index d517df2b8..58804c0ca 100644
--- a/pgtype/bits_test.go
+++ b/pgtype/bits_test.go
@@ -20,38 +20,38 @@ func isExpectedEqBits(a any) func(any) bool {
func TestBitsCodecBit(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "bit(40)", []pgxtest.ValueRoundTripTest{
{
- pgtype.Bits{Bytes: []byte{0, 0, 0, 0, 0}, Len: 40, Valid: true},
- new(pgtype.Bits),
- isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 0, 0, 0, 0}, Len: 40, Valid: true}),
+ Param: pgtype.Bits{Bytes: []byte{0, 0, 0, 0, 0}, Len: 40, Valid: true},
+ Result: new(pgtype.Bits),
+ Test: isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 0, 0, 0, 0}, Len: 40, Valid: true}),
},
{
- pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true},
- new(pgtype.Bits),
- isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true}),
+ Param: pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true},
+ Result: new(pgtype.Bits),
+ Test: isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true}),
},
- {pgtype.Bits{}, new(pgtype.Bits), isExpectedEqBits(pgtype.Bits{})},
- {nil, new(pgtype.Bits), isExpectedEqBits(pgtype.Bits{})},
+ {Param: pgtype.Bits{}, Result: new(pgtype.Bits), Test: isExpectedEqBits(pgtype.Bits{})},
+ {Param: nil, Result: new(pgtype.Bits), Test: isExpectedEqBits(pgtype.Bits{})},
})
}
func TestBitsCodecVarbit(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "varbit", []pgxtest.ValueRoundTripTest{
{
- pgtype.Bits{Bytes: []byte{}, Len: 0, Valid: true},
- new(pgtype.Bits),
- isExpectedEqBits(pgtype.Bits{Bytes: []byte{}, Len: 0, Valid: true}),
+ Param: pgtype.Bits{Bytes: []byte{}, Len: 0, Valid: true},
+ Result: new(pgtype.Bits),
+ Test: isExpectedEqBits(pgtype.Bits{Bytes: []byte{}, Len: 0, Valid: true}),
},
{
- pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true},
- new(pgtype.Bits),
- isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true}),
+ Param: pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true},
+ Result: new(pgtype.Bits),
+ Test: isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 255}, Len: 40, Valid: true}),
},
{
- pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 128}, Len: 33, Valid: true},
- new(pgtype.Bits),
- isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 128}, Len: 33, Valid: true}),
+ Param: pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 128}, Len: 33, Valid: true},
+ Result: new(pgtype.Bits),
+ Test: isExpectedEqBits(pgtype.Bits{Bytes: []byte{0, 1, 128, 254, 128}, Len: 33, Valid: true}),
},
- {pgtype.Bits{}, new(pgtype.Bits), isExpectedEqBits(pgtype.Bits{})},
- {nil, new(pgtype.Bits), isExpectedEqBits(pgtype.Bits{})},
+ {Param: pgtype.Bits{}, Result: new(pgtype.Bits), Test: isExpectedEqBits(pgtype.Bits{})},
+ {Param: nil, Result: new(pgtype.Bits), Test: isExpectedEqBits(pgtype.Bits{})},
})
}
diff --git a/pgtype/bool_test.go b/pgtype/bool_test.go
index 7480471b9..310eac325 100644
--- a/pgtype/bool_test.go
+++ b/pgtype/bool_test.go
@@ -10,11 +10,11 @@ import (
func TestBoolCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "bool", []pgxtest.ValueRoundTripTest{
- {true, new(bool), isExpectedEq(true)},
- {false, new(bool), isExpectedEq(false)},
- {true, new(pgtype.Bool), isExpectedEq(pgtype.Bool{Bool: true, Valid: true})},
- {pgtype.Bool{}, new(pgtype.Bool), isExpectedEq(pgtype.Bool{})},
- {nil, new(*bool), isExpectedEq((*bool)(nil))},
+ {Param: true, Result: new(bool), Test: isExpectedEq(true)},
+ {Param: false, Result: new(bool), Test: isExpectedEq(false)},
+ {Param: true, Result: new(pgtype.Bool), Test: isExpectedEq(pgtype.Bool{Bool: true, Valid: true})},
+ {Param: pgtype.Bool{}, Result: new(pgtype.Bool), Test: isExpectedEq(pgtype.Bool{})},
+ {Param: nil, Result: new(*bool), Test: isExpectedEq((*bool)(nil))},
})
}
diff --git a/pgtype/box_test.go b/pgtype/box_test.go
index 3b54c1f83..3a1790e5a 100644
--- a/pgtype/box_test.go
+++ b/pgtype/box_test.go
@@ -13,28 +13,28 @@ func TestBoxCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "box", []pgxtest.ValueRoundTripTest{
{
- pgtype.Box{
+ Param: pgtype.Box{
P: [2]pgtype.Vec2{{7.1, 5.2345678}, {3.14, 1.678}},
Valid: true,
},
- new(pgtype.Box),
- isExpectedEq(pgtype.Box{
+ Result: new(pgtype.Box),
+ Test: isExpectedEq(pgtype.Box{
P: [2]pgtype.Vec2{{7.1, 5.2345678}, {3.14, 1.678}},
Valid: true,
}),
},
{
- pgtype.Box{
+ Param: pgtype.Box{
P: [2]pgtype.Vec2{{7.1, 5.2345678}, {-13.14, -5.234}},
Valid: true,
},
- new(pgtype.Box),
- isExpectedEq(pgtype.Box{
+ Result: new(pgtype.Box),
+ Test: isExpectedEq(pgtype.Box{
P: [2]pgtype.Vec2{{7.1, 5.2345678}, {-13.14, -5.234}},
Valid: true,
}),
},
- {pgtype.Box{}, new(pgtype.Box), isExpectedEq(pgtype.Box{})},
- {nil, new(pgtype.Box), isExpectedEq(pgtype.Box{})},
+ {Param: pgtype.Box{}, Result: new(pgtype.Box), Test: isExpectedEq(pgtype.Box{})},
+ {Param: nil, Result: new(pgtype.Box), Test: isExpectedEq(pgtype.Box{})},
})
}
diff --git a/pgtype/builtin_wrappers.go b/pgtype/builtin_wrappers.go
index 126e0be2a..e2e3731e4 100644
--- a/pgtype/builtin_wrappers.go
+++ b/pgtype/builtin_wrappers.go
@@ -599,7 +599,7 @@ func (w *netipAddrWrapper) ScanNetipPrefix(v netip.Prefix) error {
}
func (w netipAddrWrapper) NetipPrefixValue() (netip.Prefix, error) {
- addr := (netip.Addr)(w)
+ addr := netip.Addr(w)
if !addr.IsValid() {
return netip.Prefix{}, nil
}
@@ -622,11 +622,11 @@ type mapStringToStringWrapper map[string]string
func (w *mapStringToStringWrapper) ScanHstore(v Hstore) error {
*w = make(mapStringToStringWrapper, len(v))
- for k, v := range v {
- if v == nil {
+ for i := range v {
+ if v[i] == nil {
return fmt.Errorf("cannot scan NULL to string")
}
- (*w)[k] = *v
+ (*w)[i] = *v[i]
}
return nil
}
@@ -637,9 +637,9 @@ func (w mapStringToStringWrapper) HstoreValue() (Hstore, error) {
}
hstore := make(Hstore, len(w))
- for k, v := range w {
- s := v
- hstore[k] = &s
+ for i := range w {
+ s := w[i]
+ hstore[i] = &s
}
return hstore, nil
}
@@ -822,12 +822,12 @@ func (a *anyMultiDimSliceArray) Index(i int) any {
for j := len(a.dims) - 1; j >= 0; j-- {
dimLen := int(a.dims[j].Length)
indexes[j] = i % dimLen
- i = i / dimLen
+ i /= dimLen
}
v := a.slice
- for _, si := range indexes {
- v = v.Index(si)
+ for i := range indexes {
+ v = v.Index(indexes[i])
}
return v.Interface()
diff --git a/pgtype/bytea_test.go b/pgtype/bytea_test.go
index ccd147f6f..7a9822a29 100644
--- a/pgtype/bytea_test.go
+++ b/pgtype/bytea_test.go
@@ -31,10 +31,10 @@ func isExpectedEqBytes(a any) func(any) bool {
func TestByteaCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "bytea", []pgxtest.ValueRoundTripTest{
- {[]byte{1, 2, 3}, new([]byte), isExpectedEqBytes([]byte{1, 2, 3})},
- {[]byte{}, new([]byte), isExpectedEqBytes([]byte{})},
- {[]byte(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {nil, new([]byte), isExpectedEqBytes([]byte(nil))},
+ {Param: []byte{1, 2, 3}, Result: new([]byte), Test: isExpectedEqBytes([]byte{1, 2, 3})},
+ {Param: []byte{}, Result: new([]byte), Test: isExpectedEqBytes([]byte{})},
+ {Param: []byte(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
})
}
@@ -57,9 +57,11 @@ func TestDriverBytes(t *testing.T) {
require.NoError(t, err)
defer rows.Close()
- rowCount := 0
- resultBuf := argBuf
- detectedResultMutation := false
+ var (
+ rowCount = 0
+ resultBuf = argBuf
+ detectedResultMutation = false
+ )
for rows.Next() {
rowCount++
@@ -125,8 +127,10 @@ func TestByteaCodecDecodeDatabaseSQLValue(t *testing.T) {
buf = make([]byte, len(src))
copy(buf, src)
return nil
+
default:
return fmt.Errorf("expected []byte, got %T", src)
+
}
}))
require.NoError(t, err)
diff --git a/pgtype/circle_test.go b/pgtype/circle_test.go
index 7b6db7774..6e74eb934 100644
--- a/pgtype/circle_test.go
+++ b/pgtype/circle_test.go
@@ -13,16 +13,16 @@ func TestCircleTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "circle", []pgxtest.ValueRoundTripTest{
{
- pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true},
- new(pgtype.Circle),
- isExpectedEq(pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true}),
+ Param: pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true},
+ Result: new(pgtype.Circle),
+ Test: isExpectedEq(pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true}),
},
{
- pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true},
- new(pgtype.Circle),
- isExpectedEq(pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true}),
+ Param: pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true},
+ Result: new(pgtype.Circle),
+ Test: isExpectedEq(pgtype.Circle{P: pgtype.Vec2{1.234, 5.67890123}, R: 3.5, Valid: true}),
},
- {pgtype.Circle{}, new(pgtype.Circle), isExpectedEq(pgtype.Circle{})},
- {nil, new(pgtype.Circle), isExpectedEq(pgtype.Circle{})},
+ {Param: pgtype.Circle{}, Result: new(pgtype.Circle), Test: isExpectedEq(pgtype.Circle{})},
+ {Param: nil, Result: new(pgtype.Circle), Test: isExpectedEq(pgtype.Circle{})},
})
}
diff --git a/pgtype/composite.go b/pgtype/composite.go
index 598cf7af9..bb338754d 100644
--- a/pgtype/composite.go
+++ b/pgtype/composite.go
@@ -38,8 +38,8 @@ type CompositeCodec struct {
}
func (c *CompositeCodec) FormatSupported(format int16) bool {
- for _, f := range c.Fields {
- if !f.Type.Codec.FormatSupported(format) {
+ for i := range c.Fields {
+ if !c.Fields[i].Type.Codec.FormatSupported(format) {
return false
}
}
@@ -82,8 +82,8 @@ func (plan *encodePlanCompositeCodecCompositeIndexGetterToBinary) Encode(value a
}
builder := NewCompositeBinaryBuilder(plan.m, buf)
- for i, field := range plan.cc.Fields {
- builder.AppendValue(field.Type.OID, getter.Index(i))
+ for i := range plan.cc.Fields {
+ builder.AppendValue(plan.cc.Fields[i].Type.OID, getter.Index(i))
}
return builder.Finish()
@@ -102,8 +102,8 @@ func (plan *encodePlanCompositeCodecCompositeIndexGetterToText) Encode(value any
}
b := NewCompositeTextBuilder(plan.m, buf)
- for i, field := range plan.cc.Fields {
- b.AppendValue(field.Type.OID, getter.Index(i))
+ for i := range plan.cc.Fields {
+ b.AppendValue(plan.cc.Fields[i].Type.OID, getter.Index(i))
}
return b.Finish()
@@ -139,13 +139,13 @@ func (plan *scanPlanBinaryCompositeToCompositeIndexScanner) Scan(src []byte, tar
}
scanner := NewCompositeBinaryScanner(plan.m, src)
- for i, field := range plan.cc.Fields {
+ for i := range plan.cc.Fields {
if scanner.Next() {
fieldTarget := targetScanner.ScanIndex(i)
if fieldTarget != nil {
- fieldPlan := plan.m.PlanScan(field.Type.OID, BinaryFormatCode, fieldTarget)
+ fieldPlan := plan.m.PlanScan(plan.cc.Fields[i].Type.OID, BinaryFormatCode, fieldTarget)
if fieldPlan == nil {
- return fmt.Errorf("unable to encode %v into OID %d in binary format", field, field.Type.OID)
+ return fmt.Errorf("unable to encode %v into OID %d in binary format", plan.cc.Fields[i], plan.cc.Fields[i].Type.OID)
}
err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
@@ -178,13 +178,13 @@ func (plan *scanPlanTextCompositeToCompositeIndexScanner) Scan(src []byte, targe
}
scanner := NewCompositeTextScanner(plan.m, src)
- for i, field := range plan.cc.Fields {
+ for i := range plan.cc.Fields {
if scanner.Next() {
fieldTarget := targetScanner.ScanIndex(i)
if fieldTarget != nil {
- fieldPlan := plan.m.PlanScan(field.Type.OID, TextFormatCode, fieldTarget)
+ fieldPlan := plan.m.PlanScan(plan.cc.Fields[i].Type.OID, TextFormatCode, fieldTarget)
if fieldPlan == nil {
- return fmt.Errorf("unable to encode %v into OID %d in text format", field, field.Type.OID)
+ return fmt.Errorf("unable to encode %v into OID %d in text format", plan.cc.Fields[i], plan.cc.Fields[i].Type.OID)
}
err := fieldPlan.Scan(scanner.Bytes(), fieldTarget)
@@ -293,7 +293,7 @@ type CompositeBinaryScanner struct {
func NewCompositeBinaryScanner(m *Map, src []byte) *CompositeBinaryScanner {
rp := 0
if len(src[rp:]) < 4 {
- return &CompositeBinaryScanner{err: fmt.Errorf("Record incomplete %v", src)}
+ return &CompositeBinaryScanner{err: fmt.Errorf("record incomplete %v", src)}
}
fieldCount := int32(binary.BigEndian.Uint32(src[rp:]))
@@ -319,7 +319,7 @@ func (cfs *CompositeBinaryScanner) Next() bool {
}
if len(cfs.src[cfs.rp:]) < 8 {
- cfs.err = fmt.Errorf("Record incomplete %v", cfs.src)
+ cfs.err = fmt.Errorf("record incomplete %v", cfs.src)
return false
}
cfs.fieldOID = binary.BigEndian.Uint32(cfs.src[cfs.rp:])
@@ -330,7 +330,7 @@ func (cfs *CompositeBinaryScanner) Next() bool {
if fieldLen >= 0 {
if len(cfs.src[cfs.rp:]) < fieldLen {
- cfs.err = fmt.Errorf("Record incomplete rp=%d src=%v", cfs.rp, cfs.src)
+ cfs.err = fmt.Errorf("record incomplete rp=%d src=%v", cfs.rp, cfs.src)
return false
}
cfs.fieldBytes = cfs.src[cfs.rp : cfs.rp+fieldLen]
diff --git a/pgtype/convert.go b/pgtype/convert.go
index 8a9cee9c3..f5228f6a3 100644
--- a/pgtype/convert.go
+++ b/pgtype/convert.go
@@ -15,7 +15,7 @@ func NullAssignTo(dst any) error {
dstVal := dstPtr.Elem()
switch dstVal.Kind() {
- case reflect.Ptr, reflect.Slice, reflect.Map:
+ case reflect.Pointer, reflect.Slice, reflect.Map:
dstVal.Set(reflect.Zero(dstVal.Type()))
return nil
}
@@ -41,32 +41,32 @@ func GetAssignToDstType(dst any) (any, bool) {
dstPtr := reflect.ValueOf(dst)
// AssignTo dst must always be a pointer
- if dstPtr.Kind() != reflect.Ptr {
+ if dstPtr.Kind() != reflect.Pointer {
return nil, false
}
dstVal := dstPtr.Elem()
// if dst is a pointer to pointer, allocate space try again with the dereferenced pointer
- if dstVal.Kind() == reflect.Ptr {
+ if dstVal.Kind() == reflect.Pointer {
dstVal.Set(reflect.New(dstVal.Type().Elem()))
return dstVal.Interface(), true
}
// if dst is pointer to a base type that has been renamed
if baseValType, ok := kindTypes[dstVal.Kind()]; ok {
- return toInterface(dstPtr, reflect.PtrTo(baseValType))
+ return toInterface(dstPtr, reflect.PointerTo(baseValType))
}
if dstVal.Kind() == reflect.Slice {
if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
- return toInterface(dstPtr, reflect.PtrTo(reflect.SliceOf(baseElemType)))
+ return toInterface(dstPtr, reflect.PointerTo(reflect.SliceOf(baseElemType)))
}
}
if dstVal.Kind() == reflect.Array {
if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
- return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(dstVal.Len(), baseElemType)))
+ return toInterface(dstPtr, reflect.PointerTo(reflect.ArrayOf(dstVal.Len(), baseElemType)))
}
}
@@ -76,7 +76,7 @@ func GetAssignToDstType(dst any) (any, bool) {
nested := dstVal.Type().Field(0).Type
if nested.Kind() == reflect.Array {
if baseElemType, ok := kindTypes[nested.Elem().Kind()]; ok {
- return toInterface(dstPtr, reflect.PtrTo(reflect.ArrayOf(nested.Len(), baseElemType)))
+ return toInterface(dstPtr, reflect.PointerTo(reflect.ArrayOf(nested.Len(), baseElemType)))
}
}
if _, ok := kindTypes[nested.Kind()]; ok && dstPtr.CanInterface() {
diff --git a/pgtype/date.go b/pgtype/date.go
index 447056860..7c3c62201 100644
--- a/pgtype/date.go
+++ b/pgtype/date.go
@@ -299,7 +299,7 @@ func (scanPlanTextAnyToDateScanner) Scan(src []byte, dst any) error {
}
// BC matched
- if len(match[4]) > 0 {
+ if match[4] != "" {
year = -year + 1
}
diff --git a/pgtype/date_test.go b/pgtype/date_test.go
index c7620fcf6..cde3957e4 100644
--- a/pgtype/date_test.go
+++ b/pgtype/date_test.go
@@ -21,21 +21,21 @@ func isExpectedEqTime(a any) func(any) bool {
func TestDateCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "date", []pgxtest.ValueRoundTripTest{
- {time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC))},
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC))},
- {time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(12200, 1, 2, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(12200, 1, 2, 0, 0, 0, 0, time.UTC))},
- {pgtype.Date{InfinityModifier: pgtype.Infinity, Valid: true}, new(pgtype.Date), isExpectedEq(pgtype.Date{InfinityModifier: pgtype.Infinity, Valid: true})},
- {pgtype.Date{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, new(pgtype.Date), isExpectedEq(pgtype.Date{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
- {pgtype.Date{}, new(pgtype.Date), isExpectedEq(pgtype.Date{})},
- {nil, new(*time.Time), isExpectedEq((*time.Time)(nil))},
+ {Param: time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(12200, 1, 2, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(12200, 1, 2, 0, 0, 0, 0, time.UTC))},
+ {Param: pgtype.Date{InfinityModifier: pgtype.Infinity, Valid: true}, Result: new(pgtype.Date), Test: isExpectedEq(pgtype.Date{InfinityModifier: pgtype.Infinity, Valid: true})},
+ {Param: pgtype.Date{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, Result: new(pgtype.Date), Test: isExpectedEq(pgtype.Date{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
+ {Param: pgtype.Date{}, Result: new(pgtype.Date), Test: isExpectedEq(pgtype.Date{})},
+ {Param: nil, Result: new(*time.Time), Test: isExpectedEq((*time.Time)(nil))},
})
}
diff --git a/pgtype/doc.go b/pgtype/doc.go
index 83dfc5de5..ec183aac6 100644
--- a/pgtype/doc.go
+++ b/pgtype/doc.go
@@ -92,8 +92,8 @@ For example, the following function could be called after a connection is establ
"_bar",
}
- for _, typeName := range dataTypeNames {
- dataType, err := conn.LoadType(ctx, typeName)
+ for i := range dataTypeNames {
+ dataType, err := conn.LoadType(ctx, dataTypeNames[i])
if err != nil {
return err
}
diff --git a/pgtype/float4_test.go b/pgtype/float4_test.go
index bc74921cf..b7e91c8a4 100644
--- a/pgtype/float4_test.go
+++ b/pgtype/float4_test.go
@@ -10,15 +10,15 @@ import (
func TestFloat4Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "float4", []pgxtest.ValueRoundTripTest{
- {pgtype.Float4{Float32: -1, Valid: true}, new(pgtype.Float4), isExpectedEq(pgtype.Float4{Float32: -1, Valid: true})},
- {pgtype.Float4{Float32: 0, Valid: true}, new(pgtype.Float4), isExpectedEq(pgtype.Float4{Float32: 0, Valid: true})},
- {pgtype.Float4{Float32: 1, Valid: true}, new(pgtype.Float4), isExpectedEq(pgtype.Float4{Float32: 1, Valid: true})},
- {float32(0.00001), new(float32), isExpectedEq(float32(0.00001))},
- {float32(9999.99), new(float32), isExpectedEq(float32(9999.99))},
- {pgtype.Float4{}, new(pgtype.Float4), isExpectedEq(pgtype.Float4{})},
- {int64(1), new(int64), isExpectedEq(int64(1))},
- {"1.23", new(string), isExpectedEq("1.23")},
- {nil, new(*float32), isExpectedEq((*float32)(nil))},
+ {Param: pgtype.Float4{Float32: -1, Valid: true}, Result: new(pgtype.Float4), Test: isExpectedEq(pgtype.Float4{Float32: -1, Valid: true})},
+ {Param: pgtype.Float4{Float32: 0, Valid: true}, Result: new(pgtype.Float4), Test: isExpectedEq(pgtype.Float4{Float32: 0, Valid: true})},
+ {Param: pgtype.Float4{Float32: 1, Valid: true}, Result: new(pgtype.Float4), Test: isExpectedEq(pgtype.Float4{Float32: 1, Valid: true})},
+ {Param: float32(0.00001), Result: new(float32), Test: isExpectedEq(float32(0.00001))},
+ {Param: float32(9999.99), Result: new(float32), Test: isExpectedEq(float32(9999.99))},
+ {Param: pgtype.Float4{}, Result: new(pgtype.Float4), Test: isExpectedEq(pgtype.Float4{})},
+ {Param: int64(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: "1.23", Result: new(string), Test: isExpectedEq("1.23")},
+ {Param: nil, Result: new(*float32), Test: isExpectedEq((*float32)(nil))},
})
}
diff --git a/pgtype/float8_test.go b/pgtype/float8_test.go
index 64593d97c..aedfd19b5 100644
--- a/pgtype/float8_test.go
+++ b/pgtype/float8_test.go
@@ -10,15 +10,15 @@ import (
func TestFloat8Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "float8", []pgxtest.ValueRoundTripTest{
- {pgtype.Float8{Float64: -1, Valid: true}, new(pgtype.Float8), isExpectedEq(pgtype.Float8{Float64: -1, Valid: true})},
- {pgtype.Float8{Float64: 0, Valid: true}, new(pgtype.Float8), isExpectedEq(pgtype.Float8{Float64: 0, Valid: true})},
- {pgtype.Float8{Float64: 1, Valid: true}, new(pgtype.Float8), isExpectedEq(pgtype.Float8{Float64: 1, Valid: true})},
- {float64(0.00001), new(float64), isExpectedEq(float64(0.00001))},
- {float64(9999.99), new(float64), isExpectedEq(float64(9999.99))},
- {pgtype.Float8{}, new(pgtype.Float8), isExpectedEq(pgtype.Float8{})},
- {int64(1), new(int64), isExpectedEq(int64(1))},
- {"1.23", new(string), isExpectedEq("1.23")},
- {nil, new(*float64), isExpectedEq((*float64)(nil))},
+ {Param: pgtype.Float8{Float64: -1, Valid: true}, Result: new(pgtype.Float8), Test: isExpectedEq(pgtype.Float8{Float64: -1, Valid: true})},
+ {Param: pgtype.Float8{Float64: 0, Valid: true}, Result: new(pgtype.Float8), Test: isExpectedEq(pgtype.Float8{Float64: 0, Valid: true})},
+ {Param: pgtype.Float8{Float64: 1, Valid: true}, Result: new(pgtype.Float8), Test: isExpectedEq(pgtype.Float8{Float64: 1, Valid: true})},
+ {Param: float64(0.00001), Result: new(float64), Test: isExpectedEq(float64(0.00001))},
+ {Param: float64(9999.99), Result: new(float64), Test: isExpectedEq(float64(9999.99))},
+ {Param: pgtype.Float8{}, Result: new(pgtype.Float8), Test: isExpectedEq(pgtype.Float8{})},
+ {Param: int64(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: "1.23", Result: new(string), Test: isExpectedEq("1.23")},
+ {Param: nil, Result: new(*float64), Test: isExpectedEq((*float64)(nil))},
})
}
diff --git a/pgtype/hstore.go b/pgtype/hstore.go
index 61a42de88..74c8e52d8 100644
--- a/pgtype/hstore.go
+++ b/pgtype/hstore.go
@@ -100,15 +100,15 @@ func (encodePlanHstoreCodecBinary) Encode(value any, buf []byte) (newBuf []byte,
buf = pgio.AppendInt32(buf, int32(len(hstore)))
- for k, v := range hstore {
- buf = pgio.AppendInt32(buf, int32(len(k)))
- buf = append(buf, k...)
+ for i := range hstore {
+ buf = pgio.AppendInt32(buf, int32(len(i)))
+ buf = append(buf, i...)
- if v == nil {
+ if hstore[i] == nil {
buf = pgio.AppendInt32(buf, -1)
} else {
- buf = pgio.AppendInt32(buf, int32(len(*v)))
- buf = append(buf, (*v)...)
+ buf = pgio.AppendInt32(buf, int32(len(*hstore[i])))
+ buf = append(buf, (*hstore[i])...)
}
}
@@ -136,7 +136,7 @@ func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, e
firstPair := true
- for k, v := range hstore {
+ for i := range hstore {
if firstPair {
firstPair = false
} else {
@@ -147,15 +147,15 @@ func (encodePlanHstoreCodecText) Encode(value any, buf []byte) (newBuf []byte, e
// this avoids a Mac OS X Postgres hstore parsing bug:
// https://www.postgresql.org/message-id/CA%2BHWA9awUW0%2BRV_gO9r1ABZwGoZxPztcJxPy8vMFSTbTfi4jig%40mail.gmail.com
buf = append(buf, '"')
- buf = append(buf, quoteArrayReplacer.Replace(k)...)
+ buf = append(buf, quoteArrayReplacer.Replace(i)...)
buf = append(buf, '"')
buf = append(buf, "=>"...)
- if v == nil {
+ if hstore[i] == nil {
buf = append(buf, "NULL"...)
} else {
buf = append(buf, '"')
- buf = append(buf, quoteArrayReplacer.Replace(*v)...)
+ buf = append(buf, quoteArrayReplacer.Replace(*hstore[i])...)
buf = append(buf, '"')
}
}
diff --git a/pgtype/hstore_test.go b/pgtype/hstore_test.go
index 658557292..e56a6a655 100644
--- a/pgtype/hstore_test.go
+++ b/pgtype/hstore_test.go
@@ -73,48 +73,48 @@ func TestHstoreCodec(t *testing.T) {
tests := []pgxtest.ValueRoundTripTest{
{
- map[string]string{},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{}),
+ Param: map[string]string{},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{}),
},
{
- map[string]string{"foo": "", "bar": "", "baz": "123"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo": "", "bar": "", "baz": "123"}),
+ Param: map[string]string{"foo": "", "bar": "", "baz": "123"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo": "", "bar": "", "baz": "123"}),
},
{
- map[string]string{"NULL": "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"NULL": "bar"}),
+ Param: map[string]string{"NULL": "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"NULL": "bar"}),
},
{
- map[string]string{"bar": "NULL"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"bar": "NULL"}),
+ Param: map[string]string{"bar": "NULL"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"bar": "NULL"}),
},
{
- map[string]string{"": "foo"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"": "foo"}),
+ Param: map[string]string{"": "foo"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"": "foo"}),
},
{
- map[string]*string{},
- new(map[string]*string),
- isExpectedEqMapStringPointerString(map[string]*string{}),
+ Param: map[string]*string{},
+ Result: new(map[string]*string),
+ Test: isExpectedEqMapStringPointerString(map[string]*string{}),
},
{
- map[string]*string{"foo": stringPtr("bar"), "baq": stringPtr("quz")},
- new(map[string]*string),
- isExpectedEqMapStringPointerString(map[string]*string{"foo": stringPtr("bar"), "baq": stringPtr("quz")}),
+ Param: map[string]*string{"foo": stringPtr("bar"), "baq": stringPtr("quz")},
+ Result: new(map[string]*string),
+ Test: isExpectedEqMapStringPointerString(map[string]*string{"foo": stringPtr("bar"), "baq": stringPtr("quz")}),
},
{
- map[string]*string{"foo": nil, "baq": stringPtr("quz")},
- new(map[string]*string),
- isExpectedEqMapStringPointerString(map[string]*string{"foo": nil, "baq": stringPtr("quz")}),
+ Param: map[string]*string{"foo": nil, "baq": stringPtr("quz")},
+ Result: new(map[string]*string),
+ Test: isExpectedEqMapStringPointerString(map[string]*string{"foo": nil, "baq": stringPtr("quz")}),
},
- {nil, new(*map[string]string), isExpectedEq((*map[string]string)(nil))},
- {nil, new(*map[string]*string), isExpectedEq((*map[string]*string)(nil))},
- {nil, new(*pgtype.Hstore), isExpectedEq((*pgtype.Hstore)(nil))},
+ {Param: nil, Result: new(*map[string]string), Test: isExpectedEq((*map[string]string)(nil))},
+ {Param: nil, Result: new(*map[string]*string), Test: isExpectedEq((*map[string]*string)(nil))},
+ {Param: nil, Result: new(*pgtype.Hstore), Test: isExpectedEq((*pgtype.Hstore)(nil))},
}
specialStrings := []string{
@@ -140,54 +140,54 @@ func TestHstoreCodec(t *testing.T) {
// at beginning
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{s + "foo": "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{s + "foo": "bar"}),
+ Param: map[string]string{s + "foo": "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{s + "foo": "bar"}),
})
// in middle
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo" + s + "bar": "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo" + s + "bar": "bar"}),
+ Param: map[string]string{"foo" + s + "bar": "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo" + s + "bar": "bar"}),
})
// at end
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo" + s: "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo" + s: "bar"}),
+ Param: map[string]string{"foo" + s: "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo" + s: "bar"}),
})
// is key
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{s: "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{s: "bar"}),
+ Param: map[string]string{s: "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{s: "bar"}),
})
// Special value values
// at beginning
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo": s + "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo": s + "bar"}),
+ Param: map[string]string{"foo": s + "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo": s + "bar"}),
})
// in middle
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo": "foo" + s + "bar"},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo": "foo" + s + "bar"}),
+ Param: map[string]string{"foo": "foo" + s + "bar"},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo": "foo" + s + "bar"}),
})
// at end
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo": "foo" + s},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo": "foo" + s}),
+ Param: map[string]string{"foo": "foo" + s},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo": "foo" + s}),
})
// is key
tests = append(tests, pgxtest.ValueRoundTripTest{
- map[string]string{"foo": s},
- new(map[string]string),
- isExpectedEqMapStringString(map[string]string{"foo": s}),
+ Param: map[string]string{"foo": s},
+ Result: new(map[string]string),
+ Test: isExpectedEqMapStringString(map[string]string{"foo": s}),
})
}
diff --git a/pgtype/inet_test.go b/pgtype/inet_test.go
index f4b43dafe..7747c0eff 100644
--- a/pgtype/inet_test.go
+++ b/pgtype/inet_test.go
@@ -20,53 +20,53 @@ func isExpectedEqIPNet(a any) func(any) bool {
func TestInetTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "inet", []pgxtest.ValueRoundTripTest{
- {mustParseInet(t, "0.0.0.0/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "0.0.0.0/32"))},
- {mustParseInet(t, "127.0.0.1/8"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "127.0.0.1/8"))},
- {mustParseInet(t, "12.34.56.65/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "12.34.56.65/32"))},
- {mustParseInet(t, "192.168.1.16/24"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "192.168.1.16/24"))},
- {mustParseInet(t, "255.0.0.0/8"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "255.0.0.0/8"))},
- {mustParseInet(t, "255.255.255.255/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "255.255.255.255/32"))},
- {mustParseInet(t, "2607:f8b0:4009:80b::200e"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e"))},
- {mustParseInet(t, "::1/64"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::1/64"))},
- {mustParseInet(t, "::/0"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::/0"))},
- {mustParseInet(t, "::1/128"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::1/128"))},
- {mustParseInet(t, "2607:f8b0:4009:80b::200e/64"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e/64"))},
+ {Param: mustParseInet(t, "0.0.0.0/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "0.0.0.0/32"))},
+ {Param: mustParseInet(t, "127.0.0.1/8"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "127.0.0.1/8"))},
+ {Param: mustParseInet(t, "12.34.56.65/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "12.34.56.65/32"))},
+ {Param: mustParseInet(t, "192.168.1.16/24"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "192.168.1.16/24"))},
+ {Param: mustParseInet(t, "255.0.0.0/8"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "255.0.0.0/8"))},
+ {Param: mustParseInet(t, "255.255.255.255/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "255.255.255.255/32"))},
+ {Param: mustParseInet(t, "2607:f8b0:4009:80b::200e"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e"))},
+ {Param: mustParseInet(t, "::1/64"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::1/64"))},
+ {Param: mustParseInet(t, "::/0"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::/0"))},
+ {Param: mustParseInet(t, "::1/128"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::1/128"))},
+ {Param: mustParseInet(t, "2607:f8b0:4009:80b::200e/64"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e/64"))},
- {mustParseInet(t, "0.0.0.0/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
- {mustParseInet(t, "127.0.0.1/8"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("127.0.0.1/8"))},
- {mustParseInet(t, "12.34.56.65/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("12.34.56.65/32"))},
- {mustParseInet(t, "192.168.1.16/24"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("192.168.1.16/24"))},
- {mustParseInet(t, "255.0.0.0/8"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
- {mustParseInet(t, "255.255.255.255/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("255.255.255.255/32"))},
- {mustParseInet(t, "2607:f8b0:4009:80b::200e"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"))},
- {mustParseInet(t, "::1/64"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::1/64"))},
- {mustParseInet(t, "::/0"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::/0"))},
- {mustParseInet(t, "::1/128"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::1/128"))},
- {mustParseInet(t, "2607:f8b0:4009:80b::200e/64"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"))},
+ {Param: mustParseInet(t, "0.0.0.0/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
+ {Param: mustParseInet(t, "127.0.0.1/8"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("127.0.0.1/8"))},
+ {Param: mustParseInet(t, "12.34.56.65/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("12.34.56.65/32"))},
+ {Param: mustParseInet(t, "192.168.1.16/24"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("192.168.1.16/24"))},
+ {Param: mustParseInet(t, "255.0.0.0/8"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
+ {Param: mustParseInet(t, "255.255.255.255/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("255.255.255.255/32"))},
+ {Param: mustParseInet(t, "2607:f8b0:4009:80b::200e"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"))},
+ {Param: mustParseInet(t, "::1/64"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::1/64"))},
+ {Param: mustParseInet(t, "::/0"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::/0"))},
+ {Param: mustParseInet(t, "::1/128"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::1/128"))},
+ {Param: mustParseInet(t, "2607:f8b0:4009:80b::200e/64"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"))},
- {netip.MustParsePrefix("0.0.0.0/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
- {netip.MustParsePrefix("127.0.0.1/8"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("127.0.0.1/8"))},
- {netip.MustParsePrefix("12.34.56.65/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("12.34.56.65/32"))},
- {netip.MustParsePrefix("192.168.1.16/24"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("192.168.1.16/24"))},
- {netip.MustParsePrefix("255.0.0.0/8"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
- {netip.MustParsePrefix("255.255.255.255/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("255.255.255.255/32"))},
- {netip.MustParsePrefix("::1/64"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::1/64"))},
- {netip.MustParsePrefix("::/0"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::/0"))},
- {netip.MustParsePrefix("::1/128"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::1/128"))},
- {netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"))},
+ {Param: netip.MustParsePrefix("0.0.0.0/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
+ {Param: netip.MustParsePrefix("127.0.0.1/8"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("127.0.0.1/8"))},
+ {Param: netip.MustParsePrefix("12.34.56.65/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("12.34.56.65/32"))},
+ {Param: netip.MustParsePrefix("192.168.1.16/24"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("192.168.1.16/24"))},
+ {Param: netip.MustParsePrefix("255.0.0.0/8"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
+ {Param: netip.MustParsePrefix("255.255.255.255/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("255.255.255.255/32"))},
+ {Param: netip.MustParsePrefix("::1/64"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::1/64"))},
+ {Param: netip.MustParsePrefix("::/0"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::/0"))},
+ {Param: netip.MustParsePrefix("::1/128"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::1/128"))},
+ {Param: netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/64"))},
- {netip.MustParseAddr("0.0.0.0"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("0.0.0.0"))},
- {netip.MustParseAddr("127.0.0.1"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("127.0.0.1"))},
- {netip.MustParseAddr("12.34.56.65"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("12.34.56.65"))},
- {netip.MustParseAddr("192.168.1.16"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("192.168.1.16"))},
- {netip.MustParseAddr("255.0.0.0"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("255.0.0.0"))},
- {netip.MustParseAddr("255.255.255.255"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("255.255.255.255"))},
- {netip.MustParseAddr("2607:f8b0:4009:80b::200e"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("2607:f8b0:4009:80b::200e"))},
- {netip.MustParseAddr("::1"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("::1"))},
- {netip.MustParseAddr("::"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("::"))},
- {netip.MustParseAddr("2607:f8b0:4009:80b::200e"), new(netip.Addr), isExpectedEq(netip.MustParseAddr("2607:f8b0:4009:80b::200e"))},
+ {Param: netip.MustParseAddr("0.0.0.0"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("0.0.0.0"))},
+ {Param: netip.MustParseAddr("127.0.0.1"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("127.0.0.1"))},
+ {Param: netip.MustParseAddr("12.34.56.65"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("12.34.56.65"))},
+ {Param: netip.MustParseAddr("192.168.1.16"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("192.168.1.16"))},
+ {Param: netip.MustParseAddr("255.0.0.0"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("255.0.0.0"))},
+ {Param: netip.MustParseAddr("255.255.255.255"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("255.255.255.255"))},
+ {Param: netip.MustParseAddr("2607:f8b0:4009:80b::200e"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("2607:f8b0:4009:80b::200e"))},
+ {Param: netip.MustParseAddr("::1"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("::1"))},
+ {Param: netip.MustParseAddr("::"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("::"))},
+ {Param: netip.MustParseAddr("2607:f8b0:4009:80b::200e"), Result: new(netip.Addr), Test: isExpectedEq(netip.MustParseAddr("2607:f8b0:4009:80b::200e"))},
- {nil, new(netip.Prefix), isExpectedEq(netip.Prefix{})},
+ {Param: nil, Result: new(netip.Prefix), Test: isExpectedEq(netip.Prefix{})},
})
}
@@ -74,26 +74,26 @@ func TestCidrTranscode(t *testing.T) {
skipCockroachDB(t, "Server does not support cidr type (see https://github.com/cockroachdb/cockroach/issues/18846)")
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "cidr", []pgxtest.ValueRoundTripTest{
- {mustParseInet(t, "0.0.0.0/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "0.0.0.0/32"))},
- {mustParseInet(t, "127.0.0.1/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "127.0.0.1/32"))},
- {mustParseInet(t, "12.34.56.0/32"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "12.34.56.0/32"))},
- {mustParseInet(t, "192.168.1.0/24"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "192.168.1.0/24"))},
- {mustParseInet(t, "255.0.0.0/8"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "255.0.0.0/8"))},
- {mustParseInet(t, "::/128"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::/128"))},
- {mustParseInet(t, "::/0"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::/0"))},
- {mustParseInet(t, "::1/128"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "::1/128"))},
- {mustParseInet(t, "2607:f8b0:4009:80b::200e/128"), new(net.IPNet), isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e/128"))},
+ {Param: mustParseInet(t, "0.0.0.0/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "0.0.0.0/32"))},
+ {Param: mustParseInet(t, "127.0.0.1/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "127.0.0.1/32"))},
+ {Param: mustParseInet(t, "12.34.56.0/32"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "12.34.56.0/32"))},
+ {Param: mustParseInet(t, "192.168.1.0/24"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "192.168.1.0/24"))},
+ {Param: mustParseInet(t, "255.0.0.0/8"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "255.0.0.0/8"))},
+ {Param: mustParseInet(t, "::/128"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::/128"))},
+ {Param: mustParseInet(t, "::/0"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::/0"))},
+ {Param: mustParseInet(t, "::1/128"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "::1/128"))},
+ {Param: mustParseInet(t, "2607:f8b0:4009:80b::200e/128"), Result: new(net.IPNet), Test: isExpectedEqIPNet(mustParseInet(t, "2607:f8b0:4009:80b::200e/128"))},
- {netip.MustParsePrefix("0.0.0.0/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
- {netip.MustParsePrefix("127.0.0.1/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("127.0.0.1/32"))},
- {netip.MustParsePrefix("12.34.56.0/32"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("12.34.56.0/32"))},
- {netip.MustParsePrefix("192.168.1.0/24"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("192.168.1.0/24"))},
- {netip.MustParsePrefix("255.0.0.0/8"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
- {netip.MustParsePrefix("::/128"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::/128"))},
- {netip.MustParsePrefix("::/0"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::/0"))},
- {netip.MustParsePrefix("::1/128"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("::1/128"))},
- {netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"), new(netip.Prefix), isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"))},
+ {Param: netip.MustParsePrefix("0.0.0.0/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("0.0.0.0/32"))},
+ {Param: netip.MustParsePrefix("127.0.0.1/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("127.0.0.1/32"))},
+ {Param: netip.MustParsePrefix("12.34.56.0/32"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("12.34.56.0/32"))},
+ {Param: netip.MustParsePrefix("192.168.1.0/24"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("192.168.1.0/24"))},
+ {Param: netip.MustParsePrefix("255.0.0.0/8"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("255.0.0.0/8"))},
+ {Param: netip.MustParsePrefix("::/128"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::/128"))},
+ {Param: netip.MustParsePrefix("::/0"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::/0"))},
+ {Param: netip.MustParsePrefix("::1/128"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("::1/128"))},
+ {Param: netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"), Result: new(netip.Prefix), Test: isExpectedEq(netip.MustParsePrefix("2607:f8b0:4009:80b::200e/128"))},
- {nil, new(netip.Prefix), isExpectedEq(netip.Prefix{})},
+ {Param: nil, Result: new(netip.Prefix), Test: isExpectedEq(netip.Prefix{})},
})
}
diff --git a/pgtype/int.go b/pgtype/int.go
index d1b8eb612..ef9d2f664 100644
--- a/pgtype/int.go
+++ b/pgtype/int.go
@@ -1170,10 +1170,10 @@ func (dst *Int8) ScanInt64(n Int8) error {
return nil
}
- if n.Int64 < math.MinInt64 {
+ if n.Int64 < math.MinInt8 {
return fmt.Errorf("%d is less than minimum value for Int8", n.Int64)
}
- if n.Int64 > math.MaxInt64 {
+ if n.Int64 > math.MaxInt8 {
return fmt.Errorf("%d is greater than maximum value for Int8", n.Int64)
}
*dst = Int8{Int64: int64(n.Int64), Valid: true}
@@ -1214,10 +1214,10 @@ func (dst *Int8) Scan(src any) error {
return fmt.Errorf("cannot scan %T", src)
}
- if n < math.MinInt64 {
+ if n < math.MinInt8 {
return fmt.Errorf("%d is greater than maximum value for Int8", n)
}
- if n > math.MaxInt64 {
+ if n > math.MaxInt8 {
return fmt.Errorf("%d is greater than maximum value for Int8", n)
}
*dst = Int8{Int64: int64(n), Valid: true}
@@ -1315,10 +1315,10 @@ func (encodePlanInt8CodecBinaryInt64Valuer) Encode(value any, buf []byte) (newBu
return nil, nil
}
- if n.Int64 > math.MaxInt64 {
+ if n.Int64 > math.MinInt8 {
return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
}
- if n.Int64 < math.MinInt64 {
+ if n.Int64 < math.MaxInt8 {
return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
}
@@ -1337,10 +1337,10 @@ func (encodePlanInt8CodecTextInt64Valuer) Encode(value any, buf []byte) (newBuf
return nil, nil
}
- if n.Int64 > math.MaxInt64 {
+ if n.Int64 > math.MinInt8 {
return nil, fmt.Errorf("%d is greater than maximum value for int8", n.Int64)
}
- if n.Int64 < math.MinInt64 {
+ if n.Int64 < math.MinInt8 {
return nil, fmt.Errorf("%d is less than minimum value for int8", n.Int64)
}
diff --git a/pgtype/int_test.go b/pgtype/int_test.go
index 8c4987691..33a5060ea 100644
--- a/pgtype/int_test.go
+++ b/pgtype/int_test.go
@@ -13,42 +13,42 @@ import (
func TestInt2Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int2", []pgxtest.ValueRoundTripTest{
- {int8(1), new(int16), isExpectedEq(int16(1))},
- {int16(1), new(int16), isExpectedEq(int16(1))},
- {int32(1), new(int16), isExpectedEq(int16(1))},
- {int64(1), new(int16), isExpectedEq(int16(1))},
- {uint8(1), new(int16), isExpectedEq(int16(1))},
- {uint16(1), new(int16), isExpectedEq(int16(1))},
- {uint32(1), new(int16), isExpectedEq(int16(1))},
- {uint64(1), new(int16), isExpectedEq(int16(1))},
- {int(1), new(int16), isExpectedEq(int16(1))},
- {uint(1), new(int16), isExpectedEq(int16(1))},
- {pgtype.Int2{Int16: 1, Valid: true}, new(int16), isExpectedEq(int16(1))},
- {int32(-1), new(pgtype.Int2), isExpectedEq(pgtype.Int2{Int16: -1, Valid: true})},
- {1, new(int8), isExpectedEq(int8(1))},
- {1, new(int16), isExpectedEq(int16(1))},
- {1, new(int32), isExpectedEq(int32(1))},
- {1, new(int64), isExpectedEq(int64(1))},
- {1, new(uint8), isExpectedEq(uint8(1))},
- {1, new(uint16), isExpectedEq(uint16(1))},
- {1, new(uint32), isExpectedEq(uint32(1))},
- {1, new(uint64), isExpectedEq(uint64(1))},
- {1, new(int), isExpectedEq(int(1))},
- {1, new(uint), isExpectedEq(uint(1))},
- {-1, new(int8), isExpectedEq(int8(-1))},
- {-1, new(int16), isExpectedEq(int16(-1))},
- {-1, new(int32), isExpectedEq(int32(-1))},
- {-1, new(int64), isExpectedEq(int64(-1))},
- {-1, new(int), isExpectedEq(int(-1))},
- {math.MinInt16, new(int16), isExpectedEq(int16(math.MinInt16))},
- {-1, new(int16), isExpectedEq(int16(-1))},
- {0, new(int16), isExpectedEq(int16(0))},
- {1, new(int16), isExpectedEq(int16(1))},
- {math.MaxInt16, new(int16), isExpectedEq(int16(math.MaxInt16))},
- {1, new(pgtype.Int2), isExpectedEq(pgtype.Int2{Int16: 1, Valid: true})},
- {"1", new(string), isExpectedEq("1")},
- {pgtype.Int2{}, new(pgtype.Int2), isExpectedEq(pgtype.Int2{})},
- {nil, new(*int16), isExpectedEq((*int16)(nil))},
+ {Param: int8(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: int16(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: int32(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: int64(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: uint8(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: uint16(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: uint32(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: uint64(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: int(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: uint(1), Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: pgtype.Int2{Int16: 1, Valid: true}, Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: int32(-1), Result: new(pgtype.Int2), Test: isExpectedEq(pgtype.Int2{Int16: -1, Valid: true})},
+ {Param: 1, Result: new(int8), Test: isExpectedEq(int8(1))},
+ {Param: 1, Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: 1, Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: 1, Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: 1, Result: new(uint8), Test: isExpectedEq(uint8(1))},
+ {Param: 1, Result: new(uint16), Test: isExpectedEq(uint16(1))},
+ {Param: 1, Result: new(uint32), Test: isExpectedEq(uint32(1))},
+ {Param: 1, Result: new(uint64), Test: isExpectedEq(uint64(1))},
+ {Param: 1, Result: new(int), Test: isExpectedEq(int(1))},
+ {Param: 1, Result: new(uint), Test: isExpectedEq(uint(1))},
+ {Param: -1, Result: new(int8), Test: isExpectedEq(int8(-1))},
+ {Param: -1, Result: new(int16), Test: isExpectedEq(int16(-1))},
+ {Param: -1, Result: new(int32), Test: isExpectedEq(int32(-1))},
+ {Param: -1, Result: new(int64), Test: isExpectedEq(int64(-1))},
+ {Param: -1, Result: new(int), Test: isExpectedEq(int(-1))},
+ {Param: math.MinInt16, Result: new(int16), Test: isExpectedEq(int16(math.MinInt16))},
+ {Param: -1, Result: new(int16), Test: isExpectedEq(int16(-1))},
+ {Param: 0, Result: new(int16), Test: isExpectedEq(int16(0))},
+ {Param: 1, Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: math.MaxInt16, Result: new(int16), Test: isExpectedEq(int16(math.MaxInt16))},
+ {Param: 1, Result: new(pgtype.Int2), Test: isExpectedEq(pgtype.Int2{Int16: 1, Valid: true})},
+ {Param: "1", Result: new(string), Test: isExpectedEq("1")},
+ {Param: pgtype.Int2{}, Result: new(pgtype.Int2), Test: isExpectedEq(pgtype.Int2{})},
+ {Param: nil, Result: new(*int16), Test: isExpectedEq((*int16)(nil))},
})
}
@@ -95,42 +95,42 @@ func TestInt2UnmarshalJSON(t *testing.T) {
func TestInt4Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int4", []pgxtest.ValueRoundTripTest{
- {int8(1), new(int32), isExpectedEq(int32(1))},
- {int16(1), new(int32), isExpectedEq(int32(1))},
- {int32(1), new(int32), isExpectedEq(int32(1))},
- {int64(1), new(int32), isExpectedEq(int32(1))},
- {uint8(1), new(int32), isExpectedEq(int32(1))},
- {uint16(1), new(int32), isExpectedEq(int32(1))},
- {uint32(1), new(int32), isExpectedEq(int32(1))},
- {uint64(1), new(int32), isExpectedEq(int32(1))},
- {int(1), new(int32), isExpectedEq(int32(1))},
- {uint(1), new(int32), isExpectedEq(int32(1))},
- {pgtype.Int4{Int32: 1, Valid: true}, new(int32), isExpectedEq(int32(1))},
- {int32(-1), new(pgtype.Int4), isExpectedEq(pgtype.Int4{Int32: -1, Valid: true})},
- {1, new(int8), isExpectedEq(int8(1))},
- {1, new(int16), isExpectedEq(int16(1))},
- {1, new(int32), isExpectedEq(int32(1))},
- {1, new(int64), isExpectedEq(int64(1))},
- {1, new(uint8), isExpectedEq(uint8(1))},
- {1, new(uint16), isExpectedEq(uint16(1))},
- {1, new(uint32), isExpectedEq(uint32(1))},
- {1, new(uint64), isExpectedEq(uint64(1))},
- {1, new(int), isExpectedEq(int(1))},
- {1, new(uint), isExpectedEq(uint(1))},
- {-1, new(int8), isExpectedEq(int8(-1))},
- {-1, new(int16), isExpectedEq(int16(-1))},
- {-1, new(int32), isExpectedEq(int32(-1))},
- {-1, new(int64), isExpectedEq(int64(-1))},
- {-1, new(int), isExpectedEq(int(-1))},
- {math.MinInt32, new(int32), isExpectedEq(int32(math.MinInt32))},
- {-1, new(int32), isExpectedEq(int32(-1))},
- {0, new(int32), isExpectedEq(int32(0))},
- {1, new(int32), isExpectedEq(int32(1))},
- {math.MaxInt32, new(int32), isExpectedEq(int32(math.MaxInt32))},
- {1, new(pgtype.Int4), isExpectedEq(pgtype.Int4{Int32: 1, Valid: true})},
- {"1", new(string), isExpectedEq("1")},
- {pgtype.Int4{}, new(pgtype.Int4), isExpectedEq(pgtype.Int4{})},
- {nil, new(*int32), isExpectedEq((*int32)(nil))},
+ {Param: int8(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: int16(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: int32(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: int64(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: uint8(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: uint16(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: uint32(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: uint64(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: int(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: uint(1), Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: pgtype.Int4{Int32: 1, Valid: true}, Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: int32(-1), Result: new(pgtype.Int4), Test: isExpectedEq(pgtype.Int4{Int32: -1, Valid: true})},
+ {Param: 1, Result: new(int8), Test: isExpectedEq(int8(1))},
+ {Param: 1, Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: 1, Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: 1, Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: 1, Result: new(uint8), Test: isExpectedEq(uint8(1))},
+ {Param: 1, Result: new(uint16), Test: isExpectedEq(uint16(1))},
+ {Param: 1, Result: new(uint32), Test: isExpectedEq(uint32(1))},
+ {Param: 1, Result: new(uint64), Test: isExpectedEq(uint64(1))},
+ {Param: 1, Result: new(int), Test: isExpectedEq(int(1))},
+ {Param: 1, Result: new(uint), Test: isExpectedEq(uint(1))},
+ {Param: -1, Result: new(int8), Test: isExpectedEq(int8(-1))},
+ {Param: -1, Result: new(int16), Test: isExpectedEq(int16(-1))},
+ {Param: -1, Result: new(int32), Test: isExpectedEq(int32(-1))},
+ {Param: -1, Result: new(int64), Test: isExpectedEq(int64(-1))},
+ {Param: -1, Result: new(int), Test: isExpectedEq(int(-1))},
+ {Param: math.MinInt32, Result: new(int32), Test: isExpectedEq(int32(math.MinInt32))},
+ {Param: -1, Result: new(int32), Test: isExpectedEq(int32(-1))},
+ {Param: 0, Result: new(int32), Test: isExpectedEq(int32(0))},
+ {Param: 1, Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: math.MaxInt32, Result: new(int32), Test: isExpectedEq(int32(math.MaxInt32))},
+ {Param: 1, Result: new(pgtype.Int4), Test: isExpectedEq(pgtype.Int4{Int32: 1, Valid: true})},
+ {Param: "1", Result: new(string), Test: isExpectedEq("1")},
+ {Param: pgtype.Int4{}, Result: new(pgtype.Int4), Test: isExpectedEq(pgtype.Int4{})},
+ {Param: nil, Result: new(*int32), Test: isExpectedEq((*int32)(nil))},
})
}
@@ -177,42 +177,42 @@ func TestInt4UnmarshalJSON(t *testing.T) {
func TestInt8Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int8", []pgxtest.ValueRoundTripTest{
- {int8(1), new(int64), isExpectedEq(int64(1))},
- {int16(1), new(int64), isExpectedEq(int64(1))},
- {int32(1), new(int64), isExpectedEq(int64(1))},
- {int64(1), new(int64), isExpectedEq(int64(1))},
- {uint8(1), new(int64), isExpectedEq(int64(1))},
- {uint16(1), new(int64), isExpectedEq(int64(1))},
- {uint32(1), new(int64), isExpectedEq(int64(1))},
- {uint64(1), new(int64), isExpectedEq(int64(1))},
- {int(1), new(int64), isExpectedEq(int64(1))},
- {uint(1), new(int64), isExpectedEq(int64(1))},
- {pgtype.Int8{Int64: 1, Valid: true}, new(int64), isExpectedEq(int64(1))},
- {int32(-1), new(pgtype.Int8), isExpectedEq(pgtype.Int8{Int64: -1, Valid: true})},
- {1, new(int8), isExpectedEq(int8(1))},
- {1, new(int16), isExpectedEq(int16(1))},
- {1, new(int32), isExpectedEq(int32(1))},
- {1, new(int64), isExpectedEq(int64(1))},
- {1, new(uint8), isExpectedEq(uint8(1))},
- {1, new(uint16), isExpectedEq(uint16(1))},
- {1, new(uint32), isExpectedEq(uint32(1))},
- {1, new(uint64), isExpectedEq(uint64(1))},
- {1, new(int), isExpectedEq(int(1))},
- {1, new(uint), isExpectedEq(uint(1))},
- {-1, new(int8), isExpectedEq(int8(-1))},
- {-1, new(int16), isExpectedEq(int16(-1))},
- {-1, new(int32), isExpectedEq(int32(-1))},
- {-1, new(int64), isExpectedEq(int64(-1))},
- {-1, new(int), isExpectedEq(int(-1))},
- {math.MinInt64, new(int64), isExpectedEq(int64(math.MinInt64))},
- {-1, new(int64), isExpectedEq(int64(-1))},
- {0, new(int64), isExpectedEq(int64(0))},
- {1, new(int64), isExpectedEq(int64(1))},
- {math.MaxInt64, new(int64), isExpectedEq(int64(math.MaxInt64))},
- {1, new(pgtype.Int8), isExpectedEq(pgtype.Int8{Int64: 1, Valid: true})},
- {"1", new(string), isExpectedEq("1")},
- {pgtype.Int8{}, new(pgtype.Int8), isExpectedEq(pgtype.Int8{})},
- {nil, new(*int64), isExpectedEq((*int64)(nil))},
+ {Param: int8(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: int16(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: int32(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: int64(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: uint8(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: uint16(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: uint32(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: uint64(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: int(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: uint(1), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: pgtype.Int8{Int64: 1, Valid: true}, Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: int32(-1), Result: new(pgtype.Int8), Test: isExpectedEq(pgtype.Int8{Int64: -1, Valid: true})},
+ {Param: 1, Result: new(int8), Test: isExpectedEq(int8(1))},
+ {Param: 1, Result: new(int16), Test: isExpectedEq(int16(1))},
+ {Param: 1, Result: new(int32), Test: isExpectedEq(int32(1))},
+ {Param: 1, Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: 1, Result: new(uint8), Test: isExpectedEq(uint8(1))},
+ {Param: 1, Result: new(uint16), Test: isExpectedEq(uint16(1))},
+ {Param: 1, Result: new(uint32), Test: isExpectedEq(uint32(1))},
+ {Param: 1, Result: new(uint64), Test: isExpectedEq(uint64(1))},
+ {Param: 1, Result: new(int), Test: isExpectedEq(int(1))},
+ {Param: 1, Result: new(uint), Test: isExpectedEq(uint(1))},
+ {Param: -1, Result: new(int8), Test: isExpectedEq(int8(-1))},
+ {Param: -1, Result: new(int16), Test: isExpectedEq(int16(-1))},
+ {Param: -1, Result: new(int32), Test: isExpectedEq(int32(-1))},
+ {Param: -1, Result: new(int64), Test: isExpectedEq(int64(-1))},
+ {Param: -1, Result: new(int), Test: isExpectedEq(int(-1))},
+ {Param: math.MinInt64, Result: new(int64), Test: isExpectedEq(int64(math.MinInt64))},
+ {Param: -1, Result: new(int64), Test: isExpectedEq(int64(-1))},
+ {Param: 0, Result: new(int64), Test: isExpectedEq(int64(0))},
+ {Param: 1, Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: math.MaxInt64, Result: new(int64), Test: isExpectedEq(int64(math.MaxInt64))},
+ {Param: 1, Result: new(pgtype.Int8), Test: isExpectedEq(pgtype.Int8{Int64: 1, Valid: true})},
+ {Param: "1", Result: new(string), Test: isExpectedEq("1")},
+ {Param: pgtype.Int8{}, Result: new(pgtype.Int8), Test: isExpectedEq(pgtype.Int8{})},
+ {Param: nil, Result: new(*int64), Test: isExpectedEq((*int64)(nil))},
})
}
diff --git a/pgtype/integration_benchmark_test.go b/pgtype/integration_benchmark_test.go
index 88516a9fb..a273042ae 100644
--- a/pgtype/integration_benchmark_test.go
+++ b/pgtype/integration_benchmark_test.go
@@ -14,7 +14,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int16_1_rows_1_columns(b *test
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -32,7 +32,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int16_1_rows_1_columns(b *te
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -50,7 +50,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int16_1_rows_10_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -68,7 +68,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int16_1_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -86,7 +86,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int16_10_rows_1_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -104,7 +104,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int16_10_rows_1_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -122,7 +122,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int16_100_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -140,7 +140,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int16_100_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int16
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -158,7 +158,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int32_1_rows_1_columns(b *test
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -176,7 +176,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int32_1_rows_1_columns(b *te
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -194,7 +194,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int32_1_rows_10_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -212,7 +212,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int32_1_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -230,7 +230,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int32_10_rows_1_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -248,7 +248,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int32_10_rows_1_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -266,7 +266,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int32_100_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -284,7 +284,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int32_100_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -302,7 +302,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int64_1_rows_1_columns(b *test
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -320,7 +320,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int64_1_rows_1_columns(b *te
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -338,7 +338,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int64_1_rows_10_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -356,7 +356,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int64_1_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -374,7 +374,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int64_10_rows_1_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -392,7 +392,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int64_10_rows_1_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -410,7 +410,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_int64_100_rows_10_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -428,7 +428,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_int64_100_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -446,7 +446,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_uint64_1_rows_1_columns(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -464,7 +464,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_uint64_1_rows_1_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -482,7 +482,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_uint64_1_rows_10_columns(b *te
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -500,7 +500,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_uint64_1_rows_10_columns(b *
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -518,7 +518,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_uint64_10_rows_1_columns(b *te
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -536,7 +536,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_uint64_10_rows_1_columns(b *
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -554,7 +554,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_uint64_100_rows_10_columns(b *
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -572,7 +572,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_uint64_100_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]uint64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -590,7 +590,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_pgtype_Int4_1_rows_1_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -608,7 +608,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_pgtype_Int4_1_rows_1_columns
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 1) n`,
@@ -626,7 +626,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_pgtype_Int4_1_rows_10_columns(
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -644,7 +644,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_pgtype_Int4_1_rows_10_column
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 1) n`,
@@ -662,7 +662,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_pgtype_Int4_10_rows_1_columns(
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -680,7 +680,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_pgtype_Int4_10_rows_1_column
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0 from generate_series(1, 10) n`,
@@ -698,7 +698,7 @@ func BenchmarkQueryTextFormatDecode_PG_int4_to_Go_pgtype_Int4_100_rows_10_column
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -716,7 +716,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_int4_to_Go_pgtype_Int4_100_rows_10_colu
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Int4
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::int4 + 0, n::int4 + 1, n::int4 + 2, n::int4 + 3, n::int4 + 4, n::int4 + 5, n::int4 + 6, n::int4 + 7, n::int4 + 8, n::int4 + 9 from generate_series(1, 100) n`,
@@ -734,7 +734,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_int64_1_rows_1_columns(b *t
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -752,7 +752,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_int64_1_rows_1_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -770,7 +770,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_int64_1_rows_10_columns(b *
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -788,7 +788,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_int64_1_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -806,7 +806,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_int64_10_rows_1_columns(b *
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -824,7 +824,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_int64_10_rows_1_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -842,7 +842,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_int64_100_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -860,7 +860,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_int64_100_rows_10_columns
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]int64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -878,7 +878,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_float64_1_rows_1_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -896,7 +896,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_float64_1_rows_1_columns(
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -914,7 +914,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_float64_1_rows_10_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -932,7 +932,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_float64_1_rows_10_columns
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -950,7 +950,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_float64_10_rows_1_columns(b
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -968,7 +968,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_float64_10_rows_1_columns
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -986,7 +986,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_float64_100_rows_10_columns
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -1004,7 +1004,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_float64_100_rows_10_colum
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]float64
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -1022,7 +1022,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_pgtype_Numeric_1_rows_1_col
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -1040,7 +1040,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_pgtype_Numeric_1_rows_1_c
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 1) n`,
@@ -1058,7 +1058,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_pgtype_Numeric_1_rows_10_co
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -1076,7 +1076,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_pgtype_Numeric_1_rows_10_
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 1) n`,
@@ -1094,7 +1094,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_pgtype_Numeric_10_rows_1_co
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -1112,7 +1112,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_pgtype_Numeric_10_rows_1_
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [1]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0 from generate_series(1, 10) n`,
@@ -1130,7 +1130,7 @@ func BenchmarkQueryTextFormatDecode_PG_numeric_to_Go_pgtype_Numeric_100_rows_10_
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -1148,7 +1148,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_numeric_to_Go_pgtype_Numeric_100_rows_1
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [10]pgtype.Numeric
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select n::numeric + 0, n::numeric + 1, n::numeric + 2, n::numeric + 3, n::numeric + 4, n::numeric + 5, n::numeric + 6, n::numeric + 7, n::numeric + 8, n::numeric + 9 from generate_series(1, 100) n`,
@@ -1166,7 +1166,7 @@ func BenchmarkQueryTextFormatDecode_PG_Int4Array_With_Go_Int4Array_10(b *testing
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 10) n`,
@@ -1184,7 +1184,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_Int4Array_With_Go_Int4Array_10(b *testi
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 10) n`,
@@ -1202,7 +1202,7 @@ func BenchmarkQueryTextFormatDecode_PG_Int4Array_With_Go_Int4Array_100(b *testin
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 100) n`,
@@ -1220,7 +1220,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_Int4Array_With_Go_Int4Array_100(b *test
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 100) n`,
@@ -1238,7 +1238,7 @@ func BenchmarkQueryTextFormatDecode_PG_Int4Array_With_Go_Int4Array_1000(b *testi
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 1000) n`,
@@ -1256,7 +1256,7 @@ func BenchmarkQueryBinaryFormatDecode_PG_Int4Array_With_Go_Int4Array_1000(b *tes
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, 1000) n`,
diff --git a/pgtype/integration_benchmark_test.go.erb b/pgtype/integration_benchmark_test.go.erb
index 6f4011534..51d7669e5 100644
--- a/pgtype/integration_benchmark_test.go.erb
+++ b/pgtype/integration_benchmark_test.go.erb
@@ -21,7 +21,7 @@ func BenchmarkQuery<%= format_name %>FormatDecode_PG_<%= pg_type %>_to_Go_<%= go
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v [<%= columns %>]<%= go_type %>
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select <% columns.times do |col_idx| %><% if col_idx != 0 %>, <% end %>n::<%= pg_type %> + <%= col_idx%><% end %> from generate_series(1, <%= rows %>) n`,
@@ -45,7 +45,7 @@ func BenchmarkQuery<%= format_name %>FormatDecode_PG_Int4Array_With_Go_Int4Array
defaultConnTestRunner.RunTest(context.Background(), b, func(ctx context.Context, _ testing.TB, conn *pgx.Conn) {
b.ResetTimer()
var v []int32
- for i := 0; i < b.N; i++ {
+ for b.Loop() {
rows, _ := conn.Query(
ctx,
`select array_agg(n) from generate_series(1, <%= array_size %>) n`,
diff --git a/pgtype/interval.go b/pgtype/interval.go
index b1bc78527..1f3e19dfd 100644
--- a/pgtype/interval.go
+++ b/pgtype/interval.go
@@ -185,7 +185,7 @@ func (scanPlanBinaryIntervalToIntervalScanner) Scan(src []byte, dst any) error {
}
if len(src) != 16 {
- return fmt.Errorf("Received an invalid size for an interval: %d", len(src))
+ return fmt.Errorf("received an invalid size for an interval: %d", len(src))
}
microseconds := int64(binary.BigEndian.Uint64(src))
@@ -264,7 +264,7 @@ func (scanPlanTextAnyToIntervalScanner) Scan(src []byte, dst any) error {
return fmt.Errorf("bad interval decimal format: %s", secFrac)
}
- for i := 0; i < 6-len(secFrac); i++ {
+ for range 6 - len(secFrac) {
uSeconds *= 10
}
}
diff --git a/pgtype/interval_test.go b/pgtype/interval_test.go
index 6eefaca72..d79085f5a 100644
--- a/pgtype/interval_test.go
+++ b/pgtype/interval_test.go
@@ -13,128 +13,128 @@ import (
func TestIntervalCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "interval", []pgxtest.ValueRoundTripTest{
{
- pgtype.Interval{Microseconds: 1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 1, Valid: true}),
+ Param: pgtype.Interval{Microseconds: 1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 1, Valid: true}),
},
{
- pgtype.Interval{Microseconds: 1_000_000, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 1_000_000, Valid: true}),
+ Param: pgtype.Interval{Microseconds: 1_000_000, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 1_000_000, Valid: true}),
},
{
- pgtype.Interval{Microseconds: 1_000_001, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 1_000_001, Valid: true}),
+ Param: pgtype.Interval{Microseconds: 1_000_001, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 1_000_001, Valid: true}),
},
{
- pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true}),
+ Param: pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true}),
},
{
- pgtype.Interval{Days: 1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Days: 1, Valid: true}),
+ Param: pgtype.Interval{Days: 1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Days: 1, Valid: true}),
},
{
- pgtype.Interval{Months: 1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: 1, Valid: true}),
+ Param: pgtype.Interval{Months: 1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: 1, Valid: true}),
},
{
- pgtype.Interval{Months: 12, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: 12, Valid: true}),
+ Param: pgtype.Interval{Months: 12, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: 12, Valid: true}),
},
{
- pgtype.Interval{Months: 13, Days: 15, Microseconds: 1_000_001, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: 13, Days: 15, Microseconds: 1_000_001, Valid: true}),
+ Param: pgtype.Interval{Months: 13, Days: 15, Microseconds: 1_000_001, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: 13, Days: 15, Microseconds: 1_000_001, Valid: true}),
},
{
- pgtype.Interval{Microseconds: -1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: -1, Valid: true}),
+ Param: pgtype.Interval{Microseconds: -1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: -1, Valid: true}),
},
{
- pgtype.Interval{Microseconds: -1_000_000, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: -1_000_000, Valid: true}),
+ Param: pgtype.Interval{Microseconds: -1_000_000, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: -1_000_000, Valid: true}),
},
{
- pgtype.Interval{Microseconds: -1_000_001, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: -1_000_001, Valid: true}),
+ Param: pgtype.Interval{Microseconds: -1_000_001, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: -1_000_001, Valid: true}),
},
{
- pgtype.Interval{Microseconds: -123_202_800_000_000, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: -123_202_800_000_000, Valid: true}),
+ Param: pgtype.Interval{Microseconds: -123_202_800_000_000, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: -123_202_800_000_000, Valid: true}),
},
{
- pgtype.Interval{Days: -1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Days: -1, Valid: true}),
+ Param: pgtype.Interval{Days: -1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Days: -1, Valid: true}),
},
{
- pgtype.Interval{Months: -1, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: -1, Valid: true}),
+ Param: pgtype.Interval{Months: -1, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: -1, Valid: true}),
},
{
- pgtype.Interval{Months: -12, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: -12, Valid: true}),
+ Param: pgtype.Interval{Months: -12, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: -12, Valid: true}),
},
{
- pgtype.Interval{Months: -13, Days: -15, Microseconds: -1_000_001, Valid: true},
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: -13, Days: -15, Microseconds: -1_000_001, Valid: true}),
+ Param: pgtype.Interval{Months: -13, Days: -15, Microseconds: -1_000_001, Valid: true},
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: -13, Days: -15, Microseconds: -1_000_001, Valid: true}),
},
{
- "1 second",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 1_000_000, Valid: true}),
+ Param: "1 second",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 1_000_000, Valid: true}),
},
{
- "1.000001 second",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 1_000_001, Valid: true}),
+ Param: "1.000001 second",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 1_000_001, Valid: true}),
},
{
- "34223 hours",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true}),
+ Param: "34223 hours",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Microseconds: 123_202_800_000_000, Valid: true}),
},
{
- "1 day",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Days: 1, Valid: true}),
+ Param: "1 day",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Days: 1, Valid: true}),
},
{
- "1 month",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: 1, Valid: true}),
+ Param: "1 month",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: 1, Valid: true}),
},
{
- "1 year",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: 12, Valid: true}),
+ Param: "1 year",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: 12, Valid: true}),
},
{
- "-13 mon",
- new(pgtype.Interval),
- isExpectedEq(pgtype.Interval{Months: -13, Valid: true}),
+ Param: "-13 mon",
+ Result: new(pgtype.Interval),
+ Test: isExpectedEq(pgtype.Interval{Months: -13, Valid: true}),
},
- {time.Hour, new(time.Duration), isExpectedEq(time.Hour)},
+ {Param: time.Hour, Result: new(time.Duration), Test: isExpectedEq(time.Hour)},
{
- pgtype.Interval{Months: 1, Days: 1, Valid: true},
- new(time.Duration),
- isExpectedEq(time.Duration(2_678_400_000_000_000)),
+ Param: pgtype.Interval{Months: 1, Days: 1, Valid: true},
+ Result: new(time.Duration),
+ Test: isExpectedEq(time.Duration(2_678_400_000_000_000)),
},
- {pgtype.Interval{}, new(pgtype.Interval), isExpectedEq(pgtype.Interval{})},
- {nil, new(pgtype.Interval), isExpectedEq(pgtype.Interval{})},
+ {Param: pgtype.Interval{}, Result: new(pgtype.Interval), Test: isExpectedEq(pgtype.Interval{})},
+ {Param: nil, Result: new(pgtype.Interval), Test: isExpectedEq(pgtype.Interval{})},
})
}
diff --git a/pgtype/json_test.go b/pgtype/json_test.go
index 3cc78bff2..953144afa 100644
--- a/pgtype/json_test.go
+++ b/pgtype/json_test.go
@@ -51,35 +51,35 @@ func TestJSONCodec(t *testing.T) {
var str string
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "json", []pgxtest.ValueRoundTripTest{
- {nil, new(*jsonStruct), isExpectedEq((*jsonStruct)(nil))},
- {map[string]any(nil), new(*string), isExpectedEq((*string)(nil))},
- {map[string]any(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {[]byte(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {nil, new([]byte), isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new(*jsonStruct), Test: isExpectedEq((*jsonStruct)(nil))},
+ {Param: map[string]any(nil), Result: new(*string), Test: isExpectedEq((*string)(nil))},
+ {Param: map[string]any(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: []byte(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
// Test sql.Scanner. (https://github.com/jackc/pgx/issues/1418)
- {"42", new(sql.NullInt64), isExpectedEq(sql.NullInt64{Int64: 42, Valid: true})},
+ {Param: "42", Result: new(sql.NullInt64), Test: isExpectedEq(sql.NullInt64{Int64: 42, Valid: true})},
// Test driver.Valuer. (https://github.com/jackc/pgx/issues/1430)
- {sql.NullInt64{Int64: 42, Valid: true}, new(sql.NullInt64), isExpectedEq(sql.NullInt64{Int64: 42, Valid: true})},
+ {Param: sql.NullInt64{Int64: 42, Valid: true}, Result: new(sql.NullInt64), Test: isExpectedEq(sql.NullInt64{Int64: 42, Valid: true})},
// Test driver.Valuer is used before json.Marshaler (https://github.com/jackc/pgx/issues/1805)
- {Issue1805(7), new(Issue1805), isExpectedEq(Issue1805(7))},
+ {Param: Issue1805(7), Result: new(Issue1805), Test: isExpectedEq(Issue1805(7))},
// Test driver.Scanner is used before json.Unmarshaler (https://github.com/jackc/pgx/issues/2146)
- {Issue2146(7), new(*Issue2146), isPtrExpectedEq(Issue2146(7))},
+ {Param: Issue2146(7), Result: new(*Issue2146), Test: isPtrExpectedEq(Issue2146(7))},
// Test driver.Scanner without pointer receiver (https://github.com/jackc/pgx/issues/2204)
- {NonPointerJSONScanner{V: stringPtr("{}")}, NonPointerJSONScanner{V: &str}, func(a any) bool { return str == "{}" }},
+ {Param: NonPointerJSONScanner{V: stringPtr("{}")}, Result: NonPointerJSONScanner{V: &str}, Test: func(a any) bool { return str == "{}" }},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "json", []pgxtest.ValueRoundTripTest{
- {[]byte("{}"), new([]byte), isExpectedEqBytes([]byte("{}"))},
- {[]byte("null"), new([]byte), isExpectedEqBytes([]byte("null"))},
- {[]byte("42"), new([]byte), isExpectedEqBytes([]byte("42"))},
- {[]byte(`"hello"`), new([]byte), isExpectedEqBytes([]byte(`"hello"`))},
- {[]byte(`"hello"`), new(string), isExpectedEq(`"hello"`)},
- {map[string]any{"foo": "bar"}, new(map[string]any), isExpectedEqMap(map[string]any{"foo": "bar"})},
- {jsonStruct{Name: "Adam", Age: 10}, new(jsonStruct), isExpectedEq(jsonStruct{Name: "Adam", Age: 10})},
+ {Param: []byte("{}"), Result: new([]byte), Test: isExpectedEqBytes([]byte("{}"))},
+ {Param: []byte("null"), Result: new([]byte), Test: isExpectedEqBytes([]byte("null"))},
+ {Param: []byte("42"), Result: new([]byte), Test: isExpectedEqBytes([]byte("42"))},
+ {Param: []byte(`"hello"`), Result: new([]byte), Test: isExpectedEqBytes([]byte(`"hello"`))},
+ {Param: []byte(`"hello"`), Result: new(string), Test: isExpectedEq(`"hello"`)},
+ {Param: map[string]any{"foo": "bar"}, Result: new(map[string]any), Test: isExpectedEqMap(map[string]any{"foo": "bar"})},
+ {Param: jsonStruct{Name: "Adam", Age: 10}, Result: new(jsonStruct), Test: isExpectedEq(jsonStruct{Name: "Adam", Age: 10})},
})
}
@@ -303,8 +303,8 @@ func TestJSONCodecCustomMarshal(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, pgxtest.KnownOIDQueryExecModes, "json", []pgxtest.ValueRoundTripTest{
// There is no space between "custom" and "value" in json type.
- {map[string]any{"something": "else"}, new(string), isExpectedEq(`{"custom":"value"}`)},
- {[]byte(`{"something":"else"}`), new(map[string]any), func(v any) bool {
+ {Param: map[string]any{"something": "else"}, Result: new(string), Test: isExpectedEq(`{"custom":"value"}`)},
+ {Param: []byte(`{"something":"else"}`), Result: new(map[string]any), Test: func(v any) bool {
return reflect.DeepEqual(v, map[string]any{"custom": "value"})
}},
})
diff --git a/pgtype/jsonb_test.go b/pgtype/jsonb_test.go
index 70f91253a..2c813f01a 100644
--- a/pgtype/jsonb_test.go
+++ b/pgtype/jsonb_test.go
@@ -19,21 +19,21 @@ func TestJSONBTranscode(t *testing.T) {
}
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "jsonb", []pgxtest.ValueRoundTripTest{
- {nil, new(*jsonStruct), isExpectedEq((*jsonStruct)(nil))},
- {map[string]any(nil), new(*string), isExpectedEq((*string)(nil))},
- {map[string]any(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {[]byte(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {nil, new([]byte), isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new(*jsonStruct), Test: isExpectedEq((*jsonStruct)(nil))},
+ {Param: map[string]any(nil), Result: new(*string), Test: isExpectedEq((*string)(nil))},
+ {Param: map[string]any(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: []byte(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "jsonb", []pgxtest.ValueRoundTripTest{
- {[]byte("{}"), new([]byte), isExpectedEqBytes([]byte("{}"))},
- {[]byte("null"), new([]byte), isExpectedEqBytes([]byte("null"))},
- {[]byte("42"), new([]byte), isExpectedEqBytes([]byte("42"))},
- {[]byte(`"hello"`), new([]byte), isExpectedEqBytes([]byte(`"hello"`))},
- {[]byte(`"hello"`), new(string), isExpectedEq(`"hello"`)},
- {map[string]any{"foo": "bar"}, new(map[string]any), isExpectedEqMap(map[string]any{"foo": "bar"})},
- {jsonStruct{Name: "Adam", Age: 10}, new(jsonStruct), isExpectedEq(jsonStruct{Name: "Adam", Age: 10})},
+ {Param: []byte("{}"), Result: new([]byte), Test: isExpectedEqBytes([]byte("{}"))},
+ {Param: []byte("null"), Result: new([]byte), Test: isExpectedEqBytes([]byte("null"))},
+ {Param: []byte("42"), Result: new([]byte), Test: isExpectedEqBytes([]byte("42"))},
+ {Param: []byte(`"hello"`), Result: new([]byte), Test: isExpectedEqBytes([]byte(`"hello"`))},
+ {Param: []byte(`"hello"`), Result: new(string), Test: isExpectedEq(`"hello"`)},
+ {Param: map[string]any{"foo": "bar"}, Result: new(map[string]any), Test: isExpectedEqMap(map[string]any{"foo": "bar"})},
+ {Param: jsonStruct{Name: "Adam", Age: 10}, Result: new(jsonStruct), Test: isExpectedEq(jsonStruct{Name: "Adam", Age: 10})},
})
}
@@ -101,8 +101,8 @@ func TestJSONBCodecCustomMarshal(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, pgxtest.KnownOIDQueryExecModes, "jsonb", []pgxtest.ValueRoundTripTest{
// There is space between "custom" and "value" in jsonb type.
- {map[string]any{"something": "else"}, new(string), isExpectedEq(`{"custom": "value"}`)},
- {[]byte(`{"something":"else"}`), new(map[string]any), func(v any) bool {
+ {Param: map[string]any{"something": "else"}, Result: new(string), Test: isExpectedEq(`{"custom": "value"}`)},
+ {Param: []byte(`{"something":"else"}`), Result: new(map[string]any), Test: func(v any) bool {
return reflect.DeepEqual(v, map[string]any{"custom": "value"})
}},
})
diff --git a/pgtype/line_test.go b/pgtype/line_test.go
index dc980ce10..f215a8033 100644
--- a/pgtype/line_test.go
+++ b/pgtype/line_test.go
@@ -31,28 +31,28 @@ func TestLineTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, ctr, nil, "line", []pgxtest.ValueRoundTripTest{
{
- pgtype.Line{
+ Param: pgtype.Line{
A: 1.23, B: 4.56, C: 7.89012345,
Valid: true,
},
- new(pgtype.Line),
- isExpectedEq(pgtype.Line{
+ Result: new(pgtype.Line),
+ Test: isExpectedEq(pgtype.Line{
A: 1.23, B: 4.56, C: 7.89012345,
Valid: true,
}),
},
{
- pgtype.Line{
+ Param: pgtype.Line{
A: -1.23, B: -4.56, C: -7.89,
Valid: true,
},
- new(pgtype.Line),
- isExpectedEq(pgtype.Line{
+ Result: new(pgtype.Line),
+ Test: isExpectedEq(pgtype.Line{
A: -1.23, B: -4.56, C: -7.89,
Valid: true,
}),
},
- {pgtype.Line{}, new(pgtype.Line), isExpectedEq(pgtype.Line{})},
- {nil, new(pgtype.Line), isExpectedEq(pgtype.Line{})},
+ {Param: pgtype.Line{}, Result: new(pgtype.Line), Test: isExpectedEq(pgtype.Line{})},
+ {Param: nil, Result: new(pgtype.Line), Test: isExpectedEq(pgtype.Line{})},
})
}
diff --git a/pgtype/lseg_test.go b/pgtype/lseg_test.go
index 04fde0ebd..ccc5ae69d 100644
--- a/pgtype/lseg_test.go
+++ b/pgtype/lseg_test.go
@@ -13,28 +13,28 @@ func TestLsegTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "lseg", []pgxtest.ValueRoundTripTest{
{
- pgtype.Lseg{
+ Param: pgtype.Lseg{
P: [2]pgtype.Vec2{{3.14, 1.678}, {7.1, 5.2345678901}},
Valid: true,
},
- new(pgtype.Lseg),
- isExpectedEq(pgtype.Lseg{
+ Result: new(pgtype.Lseg),
+ Test: isExpectedEq(pgtype.Lseg{
P: [2]pgtype.Vec2{{3.14, 1.678}, {7.1, 5.2345678901}},
Valid: true,
}),
},
{
- pgtype.Lseg{
+ Param: pgtype.Lseg{
P: [2]pgtype.Vec2{{7.1, 1.678}, {-13.14, -5.234}},
Valid: true,
},
- new(pgtype.Lseg),
- isExpectedEq(pgtype.Lseg{
+ Result: new(pgtype.Lseg),
+ Test: isExpectedEq(pgtype.Lseg{
P: [2]pgtype.Vec2{{7.1, 1.678}, {-13.14, -5.234}},
Valid: true,
}),
},
- {pgtype.Lseg{}, new(pgtype.Lseg), isExpectedEq(pgtype.Lseg{})},
- {nil, new(pgtype.Lseg), isExpectedEq(pgtype.Lseg{})},
+ {Param: pgtype.Lseg{}, Result: new(pgtype.Lseg), Test: isExpectedEq(pgtype.Lseg{})},
+ {Param: nil, Result: new(pgtype.Lseg), Test: isExpectedEq(pgtype.Lseg{})},
})
}
diff --git a/pgtype/ltree.go b/pgtype/ltree.go
index 6af317794..979b15b38 100644
--- a/pgtype/ltree.go
+++ b/pgtype/ltree.go
@@ -21,7 +21,7 @@ func (l LtreeCodec) PreferredFormat() int16 {
func (l LtreeCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
switch format {
case TextFormatCode:
- return (TextCodec)(l).PlanEncode(m, oid, format, value)
+ return TextCodec(l).PlanEncode(m, oid, format, value)
case BinaryFormatCode:
switch value.(type) {
case string:
@@ -72,7 +72,7 @@ func (encodeLtreeCodecBinaryTextValuer) Encode(value any, buf []byte) (newBuf []
func (l LtreeCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
switch format {
case TextFormatCode:
- return (TextCodec)(l).PlanScan(m, oid, format, target)
+ return TextCodec(l).PlanScan(m, oid, format, target)
case BinaryFormatCode:
switch target.(type) {
case *string:
@@ -113,10 +113,10 @@ func (scanPlanBinaryLtreeToTextScanner) Scan(src []byte, target any) error {
// DecodeDatabaseSQLValue returns src decoded into a value compatible with the sql.Scanner interface.
func (l LtreeCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
- return (TextCodec)(l).DecodeDatabaseSQLValue(m, oid, format, src)
+ return TextCodec(l).DecodeDatabaseSQLValue(m, oid, format, src)
}
// DecodeValue returns src decoded into its default format.
func (l LtreeCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
- return (TextCodec)(l).DecodeValue(m, oid, format, src)
+ return TextCodec(l).DecodeValue(m, oid, format, src)
}
diff --git a/pgtype/macaddr_test.go b/pgtype/macaddr_test.go
index 58149c87e..03519aced 100644
--- a/pgtype/macaddr_test.go
+++ b/pgtype/macaddr_test.go
@@ -32,39 +32,39 @@ func TestMacaddrCodec(t *testing.T) {
// Only testing known OID query exec modes as net.HardwareAddr could map to macaddr or macaddr8.
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "macaddr", []pgxtest.ValueRoundTripTest{
{
- mustParseMacaddr(t, "01:23:45:67:89:ab"),
- new(net.HardwareAddr),
- isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab")),
+ Param: mustParseMacaddr(t, "01:23:45:67:89:ab"),
+ Result: new(net.HardwareAddr),
+ Test: isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab")),
},
{
- "01:23:45:67:89:ab",
- new(net.HardwareAddr),
- isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab")),
+ Param: "01:23:45:67:89:ab",
+ Result: new(net.HardwareAddr),
+ Test: isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab")),
},
{
- mustParseMacaddr(t, "01:23:45:67:89:ab"),
- new(string),
- isExpectedEq("01:23:45:67:89:ab"),
+ Param: mustParseMacaddr(t, "01:23:45:67:89:ab"),
+ Result: new(string),
+ Test: isExpectedEq("01:23:45:67:89:ab"),
},
- {nil, new(*net.HardwareAddr), isExpectedEq((*net.HardwareAddr)(nil))},
+ {Param: nil, Result: new(*net.HardwareAddr), Test: isExpectedEq((*net.HardwareAddr)(nil))},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "macaddr8", []pgxtest.ValueRoundTripTest{
{
- mustParseMacaddr(t, "01:23:45:67:89:ab:01:08"),
- new(net.HardwareAddr),
- isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab:01:08")),
+ Param: mustParseMacaddr(t, "01:23:45:67:89:ab:01:08"),
+ Result: new(net.HardwareAddr),
+ Test: isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab:01:08")),
},
{
- "01:23:45:67:89:ab:01:08",
- new(net.HardwareAddr),
- isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab:01:08")),
+ Param: "01:23:45:67:89:ab:01:08",
+ Result: new(net.HardwareAddr),
+ Test: isExpectedEqHardwareAddr(mustParseMacaddr(t, "01:23:45:67:89:ab:01:08")),
},
{
- mustParseMacaddr(t, "01:23:45:67:89:ab:01:08"),
- new(string),
- isExpectedEq("01:23:45:67:89:ab:01:08"),
+ Param: mustParseMacaddr(t, "01:23:45:67:89:ab:01:08"),
+ Result: new(string),
+ Test: isExpectedEq("01:23:45:67:89:ab:01:08"),
},
- {nil, new(*net.HardwareAddr), isExpectedEq((*net.HardwareAddr)(nil))},
+ {Param: nil, Result: new(*net.HardwareAddr), Test: isExpectedEq((*net.HardwareAddr)(nil))},
})
}
diff --git a/pgtype/multirange.go b/pgtype/multirange.go
index 549917e4c..223c6d48a 100644
--- a/pgtype/multirange.go
+++ b/pgtype/multirange.go
@@ -262,9 +262,9 @@ func (c *MultirangeCodec) decodeText(m *Map, multirangeOID uint32, src []byte, m
elementScanPlan = m.PlanScan(c.ElementType.OID, TextFormatCode, multirange.ScanIndex(0))
}
- for i, s := range elements {
+ for i := range elements {
elem := multirange.ScanIndex(i)
- err = elementScanPlan.Scan([]byte(s), elem)
+ err = elementScanPlan.Scan([]byte(elements[i]), elem)
if err != nil {
return err
}
diff --git a/pgtype/multirange_test.go b/pgtype/multirange_test.go
index fe53083b8..38ac008af 100644
--- a/pgtype/multirange_test.go
+++ b/pgtype/multirange_test.go
@@ -17,17 +17,17 @@ func TestMultirangeCodecTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int4multirange", []pgxtest.ValueRoundTripTest{
{
- pgtype.Multirange[pgtype.Range[pgtype.Int4]](nil),
- new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
- func(a any) bool { return reflect.DeepEqual(pgtype.Multirange[pgtype.Range[pgtype.Int4]](nil), a) },
+ Param: pgtype.Multirange[pgtype.Range[pgtype.Int4]](nil),
+ Result: new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
+ Test: func(a any) bool { return reflect.DeepEqual(pgtype.Multirange[pgtype.Range[pgtype.Int4]](nil), a) },
},
{
- pgtype.Multirange[pgtype.Range[pgtype.Int4]]{},
- new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
- func(a any) bool { return reflect.DeepEqual(pgtype.Multirange[pgtype.Range[pgtype.Int4]]{}, a) },
+ Param: pgtype.Multirange[pgtype.Range[pgtype.Int4]]{},
+ Result: new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
+ Test: func(a any) bool { return reflect.DeepEqual(pgtype.Multirange[pgtype.Range[pgtype.Int4]]{}, a) },
},
{
- pgtype.Multirange[pgtype.Range[pgtype.Int4]]{
+ Param: pgtype.Multirange[pgtype.Range[pgtype.Int4]]{
{
Lower: pgtype.Int4{Int32: 1, Valid: true},
Upper: pgtype.Int4{Int32: 5, Valid: true},
@@ -43,8 +43,8 @@ func TestMultirangeCodecTranscode(t *testing.T) {
Valid: true,
},
},
- new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
- func(a any) bool {
+ Result: new(pgtype.Multirange[pgtype.Range[pgtype.Int4]]),
+ Test: func(a any) bool {
return reflect.DeepEqual(pgtype.Multirange[pgtype.Range[pgtype.Int4]]{
{
Lower: pgtype.Int4{Int32: 1, Valid: true},
diff --git a/pgtype/numeric.go b/pgtype/numeric.go
index b295c2ada..c02806aaf 100644
--- a/pgtype/numeric.go
+++ b/pgtype/numeric.go
@@ -264,11 +264,13 @@ func (n *Numeric) UnmarshalJSON(src []byte) error {
// numberString returns a string of the number. undefined if NaN, infinite, or NULL
func (n Numeric) numberTextBytes() []byte {
- intStr := n.Int.String()
- buf := &bytes.Buffer{}
+ var (
+ intStr = n.Int.String()
+ buf = &bytes.Buffer{}
+ )
- if len(intStr) > 0 && intStr[:1] == "-" {
+ if intStr != "" && intStr[:1] == "-" {
intStr = intStr[1:]
buf.WriteByte('-')
}
@@ -627,7 +629,7 @@ func (scanPlanBinaryNumericToNumericScanner) Scan(src []byte, dst any) error {
accum := &big.Int{}
- for i := 0; i < int(ndigits+3)/4; i++ {
+ for i := range int(ndigits+3) / 4 {
int64accum, bytesRead, digitsRead := nbaseDigitsToInt64(src[rp:])
rp += bytesRead
diff --git a/pgtype/numeric_test.go b/pgtype/numeric_test.go
index fa77fcf8e..e6c35ef8e 100644
--- a/pgtype/numeric_test.go
+++ b/pgtype/numeric_test.go
@@ -76,58 +76,58 @@ func mustParseNumeric(t *testing.T, src string) pgtype.Numeric {
func TestNumericCodec(t *testing.T) {
skipCockroachDB(t, "server formats numeric text format differently")
- max := new(big.Int).Exp(big.NewInt(10), big.NewInt(147454), nil)
- max.Add(max, big.NewInt(1))
- longestNumeric := pgtype.Numeric{Int: max, Exp: -16383, Valid: true}
+ maxVar := new(big.Int).Exp(big.NewInt(10), big.NewInt(147454), nil)
+ maxVar.Add(maxVar, big.NewInt(1))
+ longestNumeric := pgtype.Numeric{Int: maxVar, Exp: -16383, Valid: true}
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "numeric", []pgxtest.ValueRoundTripTest{
- {mustParseNumeric(t, "1"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "1"))},
- {mustParseNumeric(t, "3.14159"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "3.14159"))},
- {mustParseNumeric(t, "100010001"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "100010001"))},
- {mustParseNumeric(t, "100010001.0001"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "100010001.0001"))},
- {mustParseNumeric(t, "4237234789234789289347892374324872138321894178943189043890124832108934.43219085471578891547854892438945012347981"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "4237234789234789289347892374324872138321894178943189043890124832108934.43219085471578891547854892438945012347981"))},
- {mustParseNumeric(t, "0.8925092023480223478923478978978937897879595901237890234789243679037419057877231734823098432903527585734549035904590854890345905434578345789347890402348952348905890489054234237489234987723894789234"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "0.8925092023480223478923478978978937897879595901237890234789243679037419057877231734823098432903527585734549035904590854890345905434578345789347890402348952348905890489054234237489234987723894789234"))},
- {mustParseNumeric(t, "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123"))},
- {mustParseNumeric(t, "67"+strings.Repeat("0", 44535)+".0"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "67"+strings.Repeat("0", 44535)+".0"))},
- {pgtype.Numeric{Int: mustParseBigInt(t, "243723409723490243842378942378901237502734019231380123"), Exp: 23790, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "243723409723490243842378942378901237502734019231380123"), Exp: 23790, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "2437"), Exp: 23790, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "2437"), Exp: 23790, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 80, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 80, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 81, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 81, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 82, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 82, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 83, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 83, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 84, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 84, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "913423409823409243892349028349023482934092340892390101"), Exp: -14021, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "913423409823409243892349028349023482934092340892390101"), Exp: -14021, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -90, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -90, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -91, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -91, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -92, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -92, Valid: true})},
- {pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -93, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -93, Valid: true})},
- {pgtype.Numeric{NaN: true, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{NaN: true, Valid: true})},
- {longestNumeric, new(pgtype.Numeric), isExpectedEqNumeric(longestNumeric)},
- {mustParseNumeric(t, "1"), new(int64), isExpectedEq(int64(1))},
- {math.NaN(), new(float64), func(a any) bool { return math.IsNaN(a.(float64)) }},
- {float32(math.NaN()), new(float32), func(a any) bool { return math.IsNaN(float64(a.(float32))) }},
- {int64(-1), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "-1"))},
- {int64(0), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "0"))},
- {int64(1), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "1"))},
- {int64(math.MinInt64), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MinInt64, 10)))},
- {int64(math.MinInt64 + 1), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MinInt64+1, 10)))},
- {int64(math.MaxInt64), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MaxInt64, 10)))},
- {int64(math.MaxInt64 - 1), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MaxInt64-1, 10)))},
- {uint64(100), new(uint64), isExpectedEq(uint64(100))},
- {uint64(math.MaxUint64), new(uint64), isExpectedEq(uint64(math.MaxUint64))},
- {uint(math.MaxUint), new(uint), isExpectedEq(uint(math.MaxUint))},
- {uint(100), new(uint), isExpectedEq(uint(100))},
- {"1.23", new(string), isExpectedEq("1.23")},
- {pgtype.Numeric{}, new(pgtype.Numeric), isExpectedEq(pgtype.Numeric{})},
- {nil, new(pgtype.Numeric), isExpectedEq(pgtype.Numeric{})},
- {mustParseNumeric(t, "1"), new(string), isExpectedEq("1")},
- {pgtype.Numeric{NaN: true, Valid: true}, new(string), isExpectedEq("NaN")},
+ {Param: mustParseNumeric(t, "1"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "1"))},
+ {Param: mustParseNumeric(t, "3.14159"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "3.14159"))},
+ {Param: mustParseNumeric(t, "100010001"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "100010001"))},
+ {Param: mustParseNumeric(t, "100010001.0001"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "100010001.0001"))},
+ {Param: mustParseNumeric(t, "4237234789234789289347892374324872138321894178943189043890124832108934.43219085471578891547854892438945012347981"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "4237234789234789289347892374324872138321894178943189043890124832108934.43219085471578891547854892438945012347981"))},
+ {Param: mustParseNumeric(t, "0.8925092023480223478923478978978937897879595901237890234789243679037419057877231734823098432903527585734549035904590854890345905434578345789347890402348952348905890489054234237489234987723894789234"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "0.8925092023480223478923478978978937897879595901237890234789243679037419057877231734823098432903527585734549035904590854890345905434578345789347890402348952348905890489054234237489234987723894789234"))},
+ {Param: mustParseNumeric(t, "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000123"))},
+ {Param: mustParseNumeric(t, "67"+strings.Repeat("0", 44535)+".0"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "67"+strings.Repeat("0", 44535)+".0"))},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "243723409723490243842378942378901237502734019231380123"), Exp: 23790, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "243723409723490243842378942378901237502734019231380123"), Exp: 23790, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "2437"), Exp: 23790, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "2437"), Exp: 23790, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 80, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 80, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 81, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 81, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 82, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 82, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 83, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 83, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 84, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "43723409723490243842378942378901237502734019231380123"), Exp: 84, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "913423409823409243892349028349023482934092340892390101"), Exp: -14021, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "913423409823409243892349028349023482934092340892390101"), Exp: -14021, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -90, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -90, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -91, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -91, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -92, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -92, Valid: true})},
+ {Param: pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -93, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{Int: mustParseBigInt(t, "13423409823409243892349028349023482934092340892390101"), Exp: -93, Valid: true})},
+ {Param: pgtype.Numeric{NaN: true, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{NaN: true, Valid: true})},
+ {Param: longestNumeric, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(longestNumeric)},
+ {Param: mustParseNumeric(t, "1"), Result: new(int64), Test: isExpectedEq(int64(1))},
+ {Param: math.NaN(), Result: new(float64), Test: func(a any) bool { return math.IsNaN(a.(float64)) }},
+ {Param: float32(math.NaN()), Result: new(float32), Test: func(a any) bool { return math.IsNaN(float64(a.(float32))) }},
+ {Param: int64(-1), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "-1"))},
+ {Param: int64(0), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "0"))},
+ {Param: int64(1), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "1"))},
+ {Param: int64(math.MinInt64), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MinInt64, 10)))},
+ {Param: int64(math.MinInt64 + 1), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MinInt64+1, 10)))},
+ {Param: int64(math.MaxInt64), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MaxInt64, 10)))},
+ {Param: int64(math.MaxInt64 - 1), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, strconv.FormatInt(math.MaxInt64-1, 10)))},
+ {Param: uint64(100), Result: new(uint64), Test: isExpectedEq(uint64(100))},
+ {Param: uint64(math.MaxUint64), Result: new(uint64), Test: isExpectedEq(uint64(math.MaxUint64))},
+ {Param: uint(math.MaxUint), Result: new(uint), Test: isExpectedEq(uint(math.MaxUint))},
+ {Param: uint(100), Result: new(uint), Test: isExpectedEq(uint(100))},
+ {Param: "1.23", Result: new(string), Test: isExpectedEq("1.23")},
+ {Param: pgtype.Numeric{}, Result: new(pgtype.Numeric), Test: isExpectedEq(pgtype.Numeric{})},
+ {Param: nil, Result: new(pgtype.Numeric), Test: isExpectedEq(pgtype.Numeric{})},
+ {Param: mustParseNumeric(t, "1"), Result: new(string), Test: isExpectedEq("1")},
+ {Param: pgtype.Numeric{NaN: true, Valid: true}, Result: new(string), Test: isExpectedEq("NaN")},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int8", []pgxtest.ValueRoundTripTest{
- {mustParseNumeric(t, "-1"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "-1"))},
- {mustParseNumeric(t, "0"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "0"))},
- {mustParseNumeric(t, "1"), new(pgtype.Numeric), isExpectedEqNumeric(mustParseNumeric(t, "1"))},
+ {Param: mustParseNumeric(t, "-1"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "-1"))},
+ {Param: mustParseNumeric(t, "0"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "0"))},
+ {Param: mustParseNumeric(t, "1"), Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(mustParseNumeric(t, "1"))},
})
}
@@ -136,14 +136,14 @@ func TestNumericCodecInfinity(t *testing.T) {
skipPostgreSQLVersionLessThan(t, 14)
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "numeric", []pgxtest.ValueRoundTripTest{
- {math.Inf(1), new(float64), isExpectedEq(math.Inf(1))},
- {float32(math.Inf(1)), new(float32), isExpectedEq(float32(math.Inf(1)))},
- {math.Inf(-1), new(float64), isExpectedEq(math.Inf(-1))},
- {float32(math.Inf(-1)), new(float32), isExpectedEq(float32(math.Inf(-1)))},
- {pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true})},
- {pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, new(pgtype.Numeric), isExpectedEqNumeric(pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
- {pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true}, new(string), isExpectedEq("Infinity")},
- {pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, new(string), isExpectedEq("-Infinity")},
+ {Param: math.Inf(1), Result: new(float64), Test: isExpectedEq(math.Inf(1))},
+ {Param: float32(math.Inf(1)), Result: new(float32), Test: isExpectedEq(float32(math.Inf(1)))},
+ {Param: math.Inf(-1), Result: new(float64), Test: isExpectedEq(math.Inf(-1))},
+ {Param: float32(math.Inf(-1)), Result: new(float32), Test: isExpectedEq(float32(math.Inf(-1)))},
+ {Param: pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true})},
+ {Param: pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
+ {Param: pgtype.Numeric{InfinityModifier: pgtype.Infinity, Valid: true}, Result: new(string), Test: isExpectedEq("Infinity")},
+ {Param: pgtype.Numeric{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, Result: new(string), Test: isExpectedEq("-Infinity")},
})
}
@@ -175,13 +175,13 @@ func TestNumericCodecFuzz(t *testing.T) {
skipCockroachDB(t, "server formats numeric text format differently")
r := rand.New(rand.NewPCG(0, 0))
- max := &big.Int{}
- max.SetString("9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", 10)
+ maxVar := &big.Int{}
+ maxVar.SetString("9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", 10)
tests := make([]pgxtest.ValueRoundTripTest, 0, 2000)
for range 10 {
for j := -50; j < 50; j++ {
- byteLen := (max.BitLen() + 7) / 8
+ byteLen := (maxVar.BitLen() + 7) / 8
bytes := make([]byte, byteLen)
for k := 0; k < byteLen; {
val := r.Uint64()
@@ -191,15 +191,15 @@ func TestNumericCodecFuzz(t *testing.T) {
}
}
num := new(big.Int).SetBytes(bytes)
- num.Mod(num, max)
+ num.Mod(num, maxVar)
n := pgtype.Numeric{Int: num, Exp: int32(j), Valid: true}
- tests = append(tests, pgxtest.ValueRoundTripTest{n, new(pgtype.Numeric), isExpectedEqNumeric(n)})
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: n, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(n)})
negNum := &big.Int{}
negNum.Neg(num)
n = pgtype.Numeric{Int: negNum, Exp: int32(j), Valid: true}
- tests = append(tests, pgxtest.ValueRoundTripTest{n, new(pgtype.Numeric), isExpectedEqNumeric(n)})
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: n, Result: new(pgtype.Numeric), Test: isExpectedEqNumeric(n)})
}
}
diff --git a/pgtype/path.go b/pgtype/path.go
index 685996a89..ad8e4e4d8 100644
--- a/pgtype/path.go
+++ b/pgtype/path.go
@@ -110,9 +110,9 @@ func (encodePlanPathCodecBinary) Encode(value any, buf []byte) (newBuf []byte, e
buf = pgio.AppendInt32(buf, int32(len(path.P)))
- for _, p := range path.P {
- buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
- buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ for i := range path.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(path.P[i].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(path.P[i].Y))
}
return buf, nil
@@ -140,13 +140,13 @@ func (encodePlanPathCodecText) Encode(value any, buf []byte) (newBuf []byte, err
}
buf = append(buf, startByte)
- for i, p := range path.P {
+ for i := range path.P {
if i > 0 {
buf = append(buf, ',')
}
buf = append(buf, fmt.Sprintf(`(%s,%s)`,
- strconv.FormatFloat(p.X, 'f', -1, 64),
- strconv.FormatFloat(p.Y, 'f', -1, 64),
+ strconv.FormatFloat(path.P[i].X, 'f', -1, 64),
+ strconv.FormatFloat(path.P[i].Y, 'f', -1, 64),
)...)
}
diff --git a/pgtype/path_test.go b/pgtype/path_test.go
index cfffd22a6..43524ed5e 100644
--- a/pgtype/path_test.go
+++ b/pgtype/path_test.go
@@ -32,45 +32,45 @@ func TestPathTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "path", []pgxtest.ValueRoundTripTest{
{
- pgtype.Path{
+ Param: pgtype.Path{
P: []pgtype.Vec2{{3.14, 1.678901234}, {7.1, 5.234}},
Closed: false,
Valid: true,
},
- new(pgtype.Path),
- isExpectedEqPath(pgtype.Path{
+ Result: new(pgtype.Path),
+ Test: isExpectedEqPath(pgtype.Path{
P: []pgtype.Vec2{{3.14, 1.678901234}, {7.1, 5.234}},
Closed: false,
Valid: true,
}),
},
{
- pgtype.Path{
+ Param: pgtype.Path{
P: []pgtype.Vec2{{3.14, 1.678}, {7.1, 5.234}, {23.1, 9.34}},
Closed: true,
Valid: true,
},
- new(pgtype.Path),
- isExpectedEqPath(pgtype.Path{
+ Result: new(pgtype.Path),
+ Test: isExpectedEqPath(pgtype.Path{
P: []pgtype.Vec2{{3.14, 1.678}, {7.1, 5.234}, {23.1, 9.34}},
Closed: true,
Valid: true,
}),
},
{
- pgtype.Path{
+ Param: pgtype.Path{
P: []pgtype.Vec2{{7.1, 1.678}, {-13.14, -5.234}},
Closed: true,
Valid: true,
},
- new(pgtype.Path),
- isExpectedEqPath(pgtype.Path{
+ Result: new(pgtype.Path),
+ Test: isExpectedEqPath(pgtype.Path{
P: []pgtype.Vec2{{7.1, 1.678}, {-13.14, -5.234}},
Closed: true,
Valid: true,
}),
},
- {pgtype.Path{}, new(pgtype.Path), isExpectedEqPath(pgtype.Path{})},
- {nil, new(pgtype.Path), isExpectedEqPath(pgtype.Path{})},
+ {Param: pgtype.Path{}, Result: new(pgtype.Path), Test: isExpectedEqPath(pgtype.Path{})},
+ {Param: nil, Result: new(pgtype.Path), Test: isExpectedEqPath(pgtype.Path{})},
})
}
diff --git a/pgtype/pgtype.go b/pgtype/pgtype.go
index 68b63e97a..b0f6b77cf 100644
--- a/pgtype/pgtype.go
+++ b/pgtype/pgtype.go
@@ -220,8 +220,8 @@ type Map struct {
// Copy returns a new Map containing the same registered types.
func (m *Map) Copy() *Map {
newMap := NewMap()
- for _, type_ := range m.oidToType {
- newMap.RegisterType(type_)
+ for i := range m.oidToType {
+ newMap.RegisterType(m.oidToType[i])
}
return newMap
}
@@ -261,8 +261,8 @@ func NewMap() *Map {
// RegisterTypes registers multiple data types in the sequence they are provided.
func (m *Map) RegisterTypes(types []*Type) {
- for _, t := range types {
- m.RegisterType(t)
+ for i := range types {
+ m.RegisterType(types[i])
}
}
@@ -314,8 +314,8 @@ func (m *Map) TypeForName(name string) (*Type, bool) {
func (m *Map) buildReflectTypeToType() {
m.reflectTypeToType = make(map[reflect.Type]*Type)
- for reflectType, name := range m.reflectTypeToName {
- if dt, ok := m.TypeForName(name); ok {
+ for reflectType := range m.reflectTypeToName {
+ if dt, ok := m.TypeForName(m.reflectTypeToName[reflectType]); ok {
m.reflectTypeToType[reflectType] = dt
}
}
@@ -1124,8 +1124,8 @@ func (m *Map) planScan(oid uint32, formatCode int16, target any, depth int) Scan
}
}
- for _, f := range m.TryWrapScanPlanFuncs {
- if wrapperPlan, nextDst, ok := f(target); ok {
+ for i := range m.TryWrapScanPlanFuncs {
+ if wrapperPlan, nextDst, ok := m.TryWrapScanPlanFuncs[i](target); ok {
if nextPlan := m.planScan(oid, formatCode, nextDst, depth+1); nextPlan != nil {
if _, failed := nextPlan.(*scanPlanFail); !failed {
wrapperPlan.SetNext(nextPlan)
@@ -1249,8 +1249,8 @@ func (m *Map) planEncode(oid uint32, format int16, value any, depth int) EncodeP
}
}
- for _, f := range m.TryWrapEncodePlanFuncs {
- if wrapperPlan, nextValue, ok := f(value); ok {
+ for i := range m.TryWrapEncodePlanFuncs {
+ if wrapperPlan, nextValue, ok := m.TryWrapEncodePlanFuncs[i](value); ok {
if nextPlan := m.planEncodeDepth(oid, format, nextValue, depth+1); nextPlan != nil {
wrapperPlan.SetNext(nextPlan)
return wrapperPlan
@@ -1791,7 +1791,7 @@ func (plan *wrapAnyStructEncodePlan) Encode(value any, buf []byte) (newBuf []byt
func getExportedFieldValues(structValue reflect.Value) []reflect.Value {
structType := structValue.Type()
exportedFields := make([]reflect.Value, 0, structValue.NumField())
- for i := 0; i < structType.NumField(); i++ {
+ for i := range structType.NumField() {
sf := structType.Field(i)
if sf.IsExported() {
exportedFields = append(exportedFields, structValue.Field(i))
diff --git a/pgtype/pgtype_test.go b/pgtype/pgtype_test.go
index 5faed9e6b..f8f2e0fd3 100644
--- a/pgtype/pgtype_test.go
+++ b/pgtype/pgtype_test.go
@@ -106,7 +106,7 @@ func skipPostgreSQLVersionLessThan(t testing.TB, minVersion int64) {
defer conn.Close(context.Background())
serverVersionStr := conn.PgConn().ParameterStatus("server_version")
- serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ serverVersionStr = regexp.MustCompile(`^\d+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
return
diff --git a/pgtype/point_test.go b/pgtype/point_test.go
index 336f1a470..778a42d73 100644
--- a/pgtype/point_test.go
+++ b/pgtype/point_test.go
@@ -15,17 +15,17 @@ func TestPointCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "point", []pgxtest.ValueRoundTripTest{
{
- pgtype.Point{P: pgtype.Vec2{1.234, 5.6789012345}, Valid: true},
- new(pgtype.Point),
- isExpectedEq(pgtype.Point{P: pgtype.Vec2{1.234, 5.6789012345}, Valid: true}),
+ Param: pgtype.Point{P: pgtype.Vec2{1.234, 5.6789012345}, Valid: true},
+ Result: new(pgtype.Point),
+ Test: isExpectedEq(pgtype.Point{P: pgtype.Vec2{1.234, 5.6789012345}, Valid: true}),
},
{
- pgtype.Point{P: pgtype.Vec2{-1.234, -5.6789}, Valid: true},
- new(pgtype.Point),
- isExpectedEq(pgtype.Point{P: pgtype.Vec2{-1.234, -5.6789}, Valid: true}),
+ Param: pgtype.Point{P: pgtype.Vec2{-1.234, -5.6789}, Valid: true},
+ Result: new(pgtype.Point),
+ Test: isExpectedEq(pgtype.Point{P: pgtype.Vec2{-1.234, -5.6789}, Valid: true}),
},
- {pgtype.Point{}, new(pgtype.Point), isExpectedEq(pgtype.Point{})},
- {nil, new(pgtype.Point), isExpectedEq(pgtype.Point{})},
+ {Param: pgtype.Point{}, Result: new(pgtype.Point), Test: isExpectedEq(pgtype.Point{})},
+ {Param: nil, Result: new(pgtype.Point), Test: isExpectedEq(pgtype.Point{})},
})
}
diff --git a/pgtype/polygon.go b/pgtype/polygon.go
index e18c9da63..ea45d110e 100644
--- a/pgtype/polygon.go
+++ b/pgtype/polygon.go
@@ -103,9 +103,9 @@ func (encodePlanPolygonCodecBinary) Encode(value any, buf []byte) (newBuf []byte
buf = pgio.AppendInt32(buf, int32(len(polygon.P)))
- for _, p := range polygon.P {
- buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
- buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ for i := range polygon.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(polygon.P[i].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(polygon.P[i].Y))
}
return buf, nil
@@ -125,13 +125,13 @@ func (encodePlanPolygonCodecText) Encode(value any, buf []byte) (newBuf []byte,
buf = append(buf, '(')
- for i, p := range polygon.P {
+ for i := range polygon.P {
if i > 0 {
buf = append(buf, ',')
}
buf = append(buf, fmt.Sprintf(`(%s,%s)`,
- strconv.FormatFloat(p.X, 'f', -1, 64),
- strconv.FormatFloat(p.Y, 'f', -1, 64),
+ strconv.FormatFloat(polygon.P[i].X, 'f', -1, 64),
+ strconv.FormatFloat(polygon.P[i].Y, 'f', -1, 64),
)...)
}
diff --git a/pgtype/polygon_test.go b/pgtype/polygon_test.go
index 5ddbc1669..88c978818 100644
--- a/pgtype/polygon_test.go
+++ b/pgtype/polygon_test.go
@@ -32,28 +32,28 @@ func TestPolygonTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "polygon", []pgxtest.ValueRoundTripTest{
{
- pgtype.Polygon{
+ Param: pgtype.Polygon{
P: []pgtype.Vec2{{3.14, 1.678901234}, {7.1, 5.234}, {5.0, 3.234}},
Valid: true,
},
- new(pgtype.Polygon),
- isExpectedEqPolygon(pgtype.Polygon{
+ Result: new(pgtype.Polygon),
+ Test: isExpectedEqPolygon(pgtype.Polygon{
P: []pgtype.Vec2{{3.14, 1.678901234}, {7.1, 5.234}, {5.0, 3.234}},
Valid: true,
}),
},
{
- pgtype.Polygon{
+ Param: pgtype.Polygon{
P: []pgtype.Vec2{{3.14, -1.678}, {7.1, -5.234}, {23.1, 9.34}},
Valid: true,
},
- new(pgtype.Polygon),
- isExpectedEqPolygon(pgtype.Polygon{
+ Result: new(pgtype.Polygon),
+ Test: isExpectedEqPolygon(pgtype.Polygon{
P: []pgtype.Vec2{{3.14, -1.678}, {7.1, -5.234}, {23.1, 9.34}},
Valid: true,
}),
},
- {pgtype.Polygon{}, new(pgtype.Polygon), isExpectedEqPolygon(pgtype.Polygon{})},
- {nil, new(pgtype.Polygon), isExpectedEqPolygon(pgtype.Polygon{})},
+ {Param: pgtype.Polygon{}, Result: new(pgtype.Polygon), Test: isExpectedEqPolygon(pgtype.Polygon{})},
+ {Param: nil, Result: new(pgtype.Polygon), Test: isExpectedEqPolygon(pgtype.Polygon{})},
})
}
diff --git a/pgtype/qchar_test.go b/pgtype/qchar_test.go
index da00b89e4..58d3b98c4 100644
--- a/pgtype/qchar_test.go
+++ b/pgtype/qchar_test.go
@@ -12,12 +12,12 @@ func TestQcharTranscode(t *testing.T) {
skipCockroachDB(t, "Server does not support qchar")
var tests []pgxtest.ValueRoundTripTest
- for i := 0; i <= math.MaxUint8; i++ {
- tests = append(tests, pgxtest.ValueRoundTripTest{rune(i), new(rune), isExpectedEq(rune(i))})
- tests = append(tests, pgxtest.ValueRoundTripTest{byte(i), new(byte), isExpectedEq(byte(i))})
+ for i := range math.MaxUint8 + 1 {
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: rune(i), Result: new(rune), Test: isExpectedEq(rune(i))})
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: byte(i), Result: new(byte), Test: isExpectedEq(byte(i))})
}
- tests = append(tests, pgxtest.ValueRoundTripTest{nil, new(*rune), isExpectedEq((*rune)(nil))})
- tests = append(tests, pgxtest.ValueRoundTripTest{nil, new(*byte), isExpectedEq((*byte)(nil))})
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: nil, Result: new(*rune), Test: isExpectedEq((*rune)(nil))})
+ tests = append(tests, pgxtest.ValueRoundTripTest{Param: nil, Result: new(*byte), Test: isExpectedEq((*byte)(nil))})
// Can only test with known OIDs as rune and byte would be considered numbers.
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, `"char"`, tests)
diff --git a/pgtype/range_codec.go b/pgtype/range_codec.go
index 684f1bf73..4f5dbb53a 100644
--- a/pgtype/range_codec.go
+++ b/pgtype/range_codec.go
@@ -107,7 +107,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
if lowerType != Unbounded {
if lower == nil {
- return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ return nil, fmt.Errorf("lower cannot be NULL unless LowerType is Unbounded")
}
sp := len(buf)
@@ -123,7 +123,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
}
if buf == nil {
- return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ return nil, fmt.Errorf("lower cannot be NULL unless LowerType is Unbounded")
}
pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
@@ -131,7 +131,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
if upperType != Unbounded {
if upper == nil {
- return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ return nil, fmt.Errorf("upper cannot be NULL unless UpperType is Unbounded")
}
sp := len(buf)
@@ -147,7 +147,7 @@ func (plan *encodePlanRangeCodecRangeValuerToBinary) Encode(value any, buf []byt
return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
}
if buf == nil {
- return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ return nil, fmt.Errorf("upper cannot be NULL unless UpperType is Unbounded")
}
pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
@@ -184,7 +184,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
if lowerType != Unbounded {
if lower == nil {
- return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ return nil, fmt.Errorf("lower cannot be NULL unless LowerType is Unbounded")
}
lowerPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, lower)
@@ -197,7 +197,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
return nil, fmt.Errorf("failed to encode %v as element of range: %w", lower, err)
}
if buf == nil {
- return nil, fmt.Errorf("Lower cannot be NULL unless LowerType is Unbounded")
+ return nil, fmt.Errorf("lower cannot be NULL unless LowerType is Unbounded")
}
}
@@ -205,7 +205,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
if upperType != Unbounded {
if upper == nil {
- return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ return nil, fmt.Errorf("upper cannot be NULL unless UpperType is Unbounded")
}
upperPlan := plan.m.PlanEncode(plan.rc.ElementType.OID, TextFormatCode, upper)
@@ -218,7 +218,7 @@ func (plan *encodePlanRangeCodecRangeValuerToText) Encode(value any, buf []byte)
return nil, fmt.Errorf("failed to encode %v as element of range: %w", upper, err)
}
if buf == nil {
- return nil, fmt.Errorf("Upper cannot be NULL unless UpperType is Unbounded")
+ return nil, fmt.Errorf("upper cannot be NULL unless UpperType is Unbounded")
}
}
diff --git a/pgtype/range_codec_test.go b/pgtype/range_codec_test.go
index f70b7a590..6769a74b7 100644
--- a/pgtype/range_codec_test.go
+++ b/pgtype/range_codec_test.go
@@ -15,27 +15,27 @@ func TestRangeCodecTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int4range", []pgxtest.ValueRoundTripTest{
{
- pgtype.Range[pgtype.Int4]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true},
- new(pgtype.Range[pgtype.Int4]),
- isExpectedEq(pgtype.Range[pgtype.Int4]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true}),
+ Param: pgtype.Range[pgtype.Int4]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true},
+ Result: new(pgtype.Range[pgtype.Int4]),
+ Test: isExpectedEq(pgtype.Range[pgtype.Int4]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true}),
},
{
- pgtype.Range[pgtype.Int4]{
+ Param: pgtype.Range[pgtype.Int4]{
LowerType: pgtype.Inclusive,
Lower: pgtype.Int4{Int32: 1, Valid: true},
Upper: pgtype.Int4{Int32: 5, Valid: true},
UpperType: pgtype.Exclusive, Valid: true,
},
- new(pgtype.Range[pgtype.Int4]),
- isExpectedEq(pgtype.Range[pgtype.Int4]{
+ Result: new(pgtype.Range[pgtype.Int4]),
+ Test: isExpectedEq(pgtype.Range[pgtype.Int4]{
LowerType: pgtype.Inclusive,
Lower: pgtype.Int4{Int32: 1, Valid: true},
Upper: pgtype.Int4{Int32: 5, Valid: true},
UpperType: pgtype.Exclusive, Valid: true,
}),
},
- {pgtype.Range[pgtype.Int4]{}, new(pgtype.Range[pgtype.Int4]), isExpectedEq(pgtype.Range[pgtype.Int4]{})},
- {nil, new(pgtype.Range[pgtype.Int4]), isExpectedEq(pgtype.Range[pgtype.Int4]{})},
+ {Param: pgtype.Range[pgtype.Int4]{}, Result: new(pgtype.Range[pgtype.Int4]), Test: isExpectedEq(pgtype.Range[pgtype.Int4]{})},
+ {Param: nil, Result: new(pgtype.Range[pgtype.Int4]), Test: isExpectedEq(pgtype.Range[pgtype.Int4]{})},
})
}
@@ -47,27 +47,27 @@ func TestRangeCodecTranscodeCompatibleRangeElementTypes(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, ctr, nil, "numrange", []pgxtest.ValueRoundTripTest{
{
- pgtype.Range[pgtype.Float8]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true},
- new(pgtype.Range[pgtype.Float8]),
- isExpectedEq(pgtype.Range[pgtype.Float8]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true}),
+ Param: pgtype.Range[pgtype.Float8]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true},
+ Result: new(pgtype.Range[pgtype.Float8]),
+ Test: isExpectedEq(pgtype.Range[pgtype.Float8]{LowerType: pgtype.Empty, UpperType: pgtype.Empty, Valid: true}),
},
{
- pgtype.Range[pgtype.Float8]{
+ Param: pgtype.Range[pgtype.Float8]{
LowerType: pgtype.Inclusive,
Lower: pgtype.Float8{Float64: 1, Valid: true},
Upper: pgtype.Float8{Float64: 5, Valid: true},
UpperType: pgtype.Exclusive, Valid: true,
},
- new(pgtype.Range[pgtype.Float8]),
- isExpectedEq(pgtype.Range[pgtype.Float8]{
+ Result: new(pgtype.Range[pgtype.Float8]),
+ Test: isExpectedEq(pgtype.Range[pgtype.Float8]{
LowerType: pgtype.Inclusive,
Lower: pgtype.Float8{Float64: 1, Valid: true},
Upper: pgtype.Float8{Float64: 5, Valid: true},
UpperType: pgtype.Exclusive, Valid: true,
}),
},
- {pgtype.Range[pgtype.Float8]{}, new(pgtype.Range[pgtype.Float8]), isExpectedEq(pgtype.Range[pgtype.Float8]{})},
- {nil, new(pgtype.Range[pgtype.Float8]), isExpectedEq(pgtype.Range[pgtype.Float8]{})},
+ {Param: pgtype.Range[pgtype.Float8]{}, Result: new(pgtype.Range[pgtype.Float8]), Test: isExpectedEq(pgtype.Range[pgtype.Float8]{})},
+ {Param: nil, Result: new(pgtype.Range[pgtype.Float8]), Test: isExpectedEq(pgtype.Range[pgtype.Float8]{})},
})
}
diff --git a/pgtype/text_test.go b/pgtype/text_test.go
index eb5d005ec..524624355 100644
--- a/pgtype/text_test.go
+++ b/pgtype/text_test.go
@@ -20,18 +20,18 @@ func TestTextCodec(t *testing.T) {
for _, pgTypeName := range []string{"text", "varchar"} {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, pgTypeName, []pgxtest.ValueRoundTripTest{
{
- pgtype.Text{String: "", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "", Valid: true}),
+ Param: pgtype.Text{String: "", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "", Valid: true}),
},
{
- pgtype.Text{String: "foo", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "foo", Valid: true}),
+ Param: pgtype.Text{String: "foo", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "foo", Valid: true}),
},
- {nil, new(pgtype.Text), isExpectedEq(pgtype.Text{})},
- {"foo", new(string), isExpectedEq("foo")},
- {someFmtStringer{}, new(string), isExpectedEq("some fmt.Stringer")},
+ {Param: nil, Result: new(pgtype.Text), Test: isExpectedEq(pgtype.Text{})},
+ {Param: "foo", Result: new(string), Test: isExpectedEq("foo")},
+ {Param: someFmtStringer{}, Result: new(string), Test: isExpectedEq("some fmt.Stringer")},
})
}
}
@@ -49,17 +49,17 @@ func TestTextCodec(t *testing.T) {
func TestTextCodecName(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "name", []pgxtest.ValueRoundTripTest{
{
- pgtype.Text{String: "", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "", Valid: true}),
+ Param: pgtype.Text{String: "", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "", Valid: true}),
},
{
- pgtype.Text{String: "foo", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "foo", Valid: true}),
+ Param: pgtype.Text{String: "foo", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "foo", Valid: true}),
},
- {nil, new(pgtype.Text), isExpectedEq(pgtype.Text{})},
- {"foo", new(string), isExpectedEq("foo")},
+ {Param: nil, Result: new(pgtype.Text), Test: isExpectedEq(pgtype.Text{})},
+ {Param: "foo", Result: new(string), Test: isExpectedEq("foo")},
})
}
@@ -69,14 +69,14 @@ func TestTextCodecBPChar(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "char(3)", []pgxtest.ValueRoundTripTest{
{
- pgtype.Text{String: "a ", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "a ", Valid: true}),
+ Param: pgtype.Text{String: "a ", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "a ", Valid: true}),
},
- {nil, new(pgtype.Text), isExpectedEq(pgtype.Text{})},
- {" ", new(string), isExpectedEq(" ")},
- {"", new(string), isExpectedEq(" ")},
- {" å—¨ ", new(string), isExpectedEq(" å—¨ ")},
+ {Param: nil, Result: new(pgtype.Text), Test: isExpectedEq(pgtype.Text{})},
+ {Param: " ", Result: new(string), Test: isExpectedEq(" ")},
+ {Param: "", Result: new(string), Test: isExpectedEq(" ")},
+ {Param: " å—¨ ", Result: new(string), Test: isExpectedEq(" å—¨ ")},
})
}
@@ -101,12 +101,12 @@ func TestTextCodecACLItem(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, ctr, nil, "aclitem", []pgxtest.ValueRoundTripTest{
{
- pgtype.Text{String: "postgres=arwdDxt/postgres", Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: "postgres=arwdDxt/postgres", Valid: true}),
+ Param: pgtype.Text{String: "postgres=arwdDxt/postgres", Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: "postgres=arwdDxt/postgres", Valid: true}),
},
- {pgtype.Text{}, new(pgtype.Text), isExpectedEq(pgtype.Text{})},
- {nil, new(pgtype.Text), isExpectedEq(pgtype.Text{})},
+ {Param: pgtype.Text{}, Result: new(pgtype.Text), Test: isExpectedEq(pgtype.Text{})},
+ {Param: nil, Result: new(pgtype.Text), Test: isExpectedEq(pgtype.Text{})},
})
}
@@ -129,9 +129,9 @@ func TestTextCodecACLItemRoleWithSpecialCharacters(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, ctr, nil, "aclitem", []pgxtest.ValueRoundTripTest{
{
- pgtype.Text{String: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true},
- new(pgtype.Text),
- isExpectedEq(pgtype.Text{String: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}),
+ Param: pgtype.Text{String: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true},
+ Result: new(pgtype.Text),
+ Test: isExpectedEq(pgtype.Text{String: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}),
},
})
}
diff --git a/pgtype/tid_test.go b/pgtype/tid_test.go
index 3e7a1a50c..3c6da25bb 100644
--- a/pgtype/tid_test.go
+++ b/pgtype/tid_test.go
@@ -13,26 +13,26 @@ func TestTIDCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "tid", []pgxtest.ValueRoundTripTest{
{
- pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true},
- new(pgtype.TID),
- isExpectedEq(pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true}),
+ Param: pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true},
+ Result: new(pgtype.TID),
+ Test: isExpectedEq(pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true}),
},
{
- pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true},
- new(pgtype.TID),
- isExpectedEq(pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true}),
+ Param: pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true},
+ Result: new(pgtype.TID),
+ Test: isExpectedEq(pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true}),
},
{
- pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true},
- new(string),
- isExpectedEq("(42,43)"),
+ Param: pgtype.TID{BlockNumber: 42, OffsetNumber: 43, Valid: true},
+ Result: new(string),
+ Test: isExpectedEq("(42,43)"),
},
{
- pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true},
- new(string),
- isExpectedEq("(4294967295,65535)"),
+ Param: pgtype.TID{BlockNumber: 4294967295, OffsetNumber: 65535, Valid: true},
+ Result: new(string),
+ Test: isExpectedEq("(4294967295,65535)"),
},
- {pgtype.TID{}, new(pgtype.TID), isExpectedEq(pgtype.TID{})},
- {nil, new(pgtype.TID), isExpectedEq(pgtype.TID{})},
+ {Param: pgtype.TID{}, Result: new(pgtype.TID), Test: isExpectedEq(pgtype.TID{})},
+ {Param: nil, Result: new(pgtype.TID), Test: isExpectedEq(pgtype.TID{})},
})
}
diff --git a/pgtype/time_test.go b/pgtype/time_test.go
index 8ea4459a8..11ae29bdf 100644
--- a/pgtype/time_test.go
+++ b/pgtype/time_test.go
@@ -14,37 +14,37 @@ import (
func TestTimeCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "time", []pgxtest.ValueRoundTripTest{
{
- pgtype.Time{Microseconds: 0, Valid: true},
- new(pgtype.Time),
- isExpectedEq(pgtype.Time{Microseconds: 0, Valid: true}),
+ Param: pgtype.Time{Microseconds: 0, Valid: true},
+ Result: new(pgtype.Time),
+ Test: isExpectedEq(pgtype.Time{Microseconds: 0, Valid: true}),
},
{
- pgtype.Time{Microseconds: 1, Valid: true},
- new(pgtype.Time),
- isExpectedEq(pgtype.Time{Microseconds: 1, Valid: true}),
+ Param: pgtype.Time{Microseconds: 1, Valid: true},
+ Result: new(pgtype.Time),
+ Test: isExpectedEq(pgtype.Time{Microseconds: 1, Valid: true}),
},
{
- pgtype.Time{Microseconds: 86_399_999_999, Valid: true},
- new(pgtype.Time),
- isExpectedEq(pgtype.Time{Microseconds: 86_399_999_999, Valid: true}),
+ Param: pgtype.Time{Microseconds: 86_399_999_999, Valid: true},
+ Result: new(pgtype.Time),
+ Test: isExpectedEq(pgtype.Time{Microseconds: 86_399_999_999, Valid: true}),
},
{
- pgtype.Time{Microseconds: 86_400_000_000, Valid: true},
- new(pgtype.Time),
- isExpectedEq(pgtype.Time{Microseconds: 86_400_000_000, Valid: true}),
+ Param: pgtype.Time{Microseconds: 86_400_000_000, Valid: true},
+ Result: new(pgtype.Time),
+ Test: isExpectedEq(pgtype.Time{Microseconds: 86_400_000_000, Valid: true}),
},
{
- time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
- new(pgtype.Time),
- isExpectedEq(pgtype.Time{Microseconds: 0, Valid: true}),
+ Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ Result: new(pgtype.Time),
+ Test: isExpectedEq(pgtype.Time{Microseconds: 0, Valid: true}),
},
{
- pgtype.Time{Microseconds: 0, Valid: true},
- new(time.Time),
- isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)),
+ Param: pgtype.Time{Microseconds: 0, Valid: true},
+ Result: new(time.Time),
+ Test: isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC)),
},
- {pgtype.Time{}, new(pgtype.Time), isExpectedEq(pgtype.Time{})},
- {nil, new(pgtype.Time), isExpectedEq(pgtype.Time{})},
+ {Param: pgtype.Time{}, Result: new(pgtype.Time), Test: isExpectedEq(pgtype.Time{})},
+ {Param: nil, Result: new(pgtype.Time), Test: isExpectedEq(pgtype.Time{})},
})
}
@@ -66,12 +66,12 @@ func TestTimeTextScanner(t *testing.T) {
const mirco = "789123"
const woFraction = int64(4*time.Hour + 5*time.Minute + 6*time.Second) // time without fraction
- for i := 0; i <= len(mirco); i++ {
+ for i := range len(mirco) + 1 {
assert.NoError(t, pgTime.Scan("04:05:06."+mirco[:i]))
assert.Equal(t, true, pgTime.Valid)
frac, _ := strconv.ParseInt(mirco[:i], 10, 64)
- for k := i; k < 6; k++ {
+ for range 6 {
frac *= 10
}
assert.Equal(t, woFraction+frac*int64(time.Microsecond), pgTime.Microseconds*int64(time.Microsecond))
diff --git a/pgtype/timestamp.go b/pgtype/timestamp.go
index e94ce6bcd..55d8edaf0 100644
--- a/pgtype/timestamp.go
+++ b/pgtype/timestamp.go
@@ -218,12 +218,15 @@ func (encodePlanTimestampCodecText) Encode(value any, buf []byte) (newBuf []byte
s = t.Truncate(time.Microsecond).Format(pgTimestampFormat)
if bc {
- s = s + " BC"
+ s += " BC"
}
+
case Infinity:
s = "infinity"
+
case NegativeInfinity:
s = "-infinity"
+
}
buf = append(buf, s...)
diff --git a/pgtype/timestamp_test.go b/pgtype/timestamp_test.go
index 5e9022f42..73cc8bd8e 100644
--- a/pgtype/timestamp_test.go
+++ b/pgtype/timestamp_test.go
@@ -17,26 +17,26 @@ func TestTimestampCodec(t *testing.T) {
skipCockroachDB(t, "Server does not support infinite timestamps (see https://github.com/cockroachdb/cockroach/issues/41564)")
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "timestamp", []pgxtest.ValueRoundTripTest{
- {time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC))},
-
- {time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC))},
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
- {time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC))},
- {time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC))},
+
+ {Param: time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.UTC))},
// Nanosecond truncation
- {time.Date(2020, 1, 1, 0, 0, 0, 999999999, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.UTC))},
- {time.Date(2020, 1, 1, 0, 0, 0, 999999001, time.UTC), new(time.Time), isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.UTC))},
+ {Param: time.Date(2020, 1, 1, 0, 0, 0, 999999999, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.UTC))},
+ {Param: time.Date(2020, 1, 1, 0, 0, 0, 999999001, time.UTC), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.UTC))},
- {pgtype.Timestamp{InfinityModifier: pgtype.Infinity, Valid: true}, new(pgtype.Timestamp), isExpectedEq(pgtype.Timestamp{InfinityModifier: pgtype.Infinity, Valid: true})},
- {pgtype.Timestamp{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, new(pgtype.Timestamp), isExpectedEq(pgtype.Timestamp{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
- {pgtype.Timestamp{}, new(pgtype.Timestamp), isExpectedEq(pgtype.Timestamp{})},
- {nil, new(*time.Time), isExpectedEq((*time.Time)(nil))},
+ {Param: pgtype.Timestamp{InfinityModifier: pgtype.Infinity, Valid: true}, Result: new(pgtype.Timestamp), Test: isExpectedEq(pgtype.Timestamp{InfinityModifier: pgtype.Infinity, Valid: true})},
+ {Param: pgtype.Timestamp{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, Result: new(pgtype.Timestamp), Test: isExpectedEq(pgtype.Timestamp{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
+ {Param: pgtype.Timestamp{}, Result: new(pgtype.Timestamp), Test: isExpectedEq(pgtype.Timestamp{})},
+ {Param: nil, Result: new(*time.Time), Test: isExpectedEq((*time.Time)(nil))},
})
}
@@ -55,7 +55,7 @@ func TestTimestampCodecWithScanLocationUTC(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, nil, "timestamp", []pgxtest.ValueRoundTripTest{
// Have to use pgtype.Timestamp instead of time.Time as source because otherwise the simple and exec query exec
// modes will encode the time for timestamptz. That is, they will convert it from local time zone.
- {pgtype.Timestamp{Time: time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}, new(time.Time), isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: pgtype.Timestamp{Time: time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}, Result: new(time.Time), Test: isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
})
}
@@ -72,7 +72,7 @@ func TestTimestampCodecWithScanLocationLocal(t *testing.T) {
}
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, nil, "timestamp", []pgxtest.ValueRoundTripTest{
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
})
}
diff --git a/pgtype/timestamptz.go b/pgtype/timestamptz.go
index 4d055bfab..3f9a4c7c7 100644
--- a/pgtype/timestamptz.go
+++ b/pgtype/timestamptz.go
@@ -217,12 +217,14 @@ func (encodePlanTimestamptzCodecText) Encode(value any, buf []byte) (newBuf []by
s = t.Format(pgTimestamptzSecondFormat)
if bc {
- s = s + " BC"
+ s += " BC"
}
case Infinity:
s = "infinity"
+
case NegativeInfinity:
s = "-infinity"
+
}
buf = append(buf, s...)
diff --git a/pgtype/timestamptz_test.go b/pgtype/timestamptz_test.go
index 572481958..00cace5c2 100644
--- a/pgtype/timestamptz_test.go
+++ b/pgtype/timestamptz_test.go
@@ -15,26 +15,26 @@ func TestTimestamptzCodec(t *testing.T) {
skipCockroachDB(t, "Server does not support infinite timestamps (see https://github.com/cockroachdb/cockroach/issues/41564)")
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "timestamptz", []pgxtest.ValueRoundTripTest{
- {time.Date(-100, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(0, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(1, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.Local))},
-
- {time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local))},
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
- {time.Date(2000, 1, 2, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.Local))},
- {time.Date(2200, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(-100, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-100, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(0, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(0, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(1, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1, 1, 1, 0, 0, 0, 0, time.Local))},
+
+ {Param: time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(2000, 1, 2, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2000, 1, 2, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(2200, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2200, 1, 1, 0, 0, 0, 0, time.Local))},
// Nanosecond truncation
- {time.Date(2020, 1, 1, 0, 0, 0, 999999999, time.Local), new(time.Time), isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.Local))},
- {time.Date(2020, 1, 1, 0, 0, 0, 999999001, time.Local), new(time.Time), isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.Local))},
+ {Param: time.Date(2020, 1, 1, 0, 0, 0, 999999999, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.Local))},
+ {Param: time.Date(2020, 1, 1, 0, 0, 0, 999999001, time.Local), Result: new(time.Time), Test: isExpectedEqTime(time.Date(2020, 1, 1, 0, 0, 0, 999999000, time.Local))},
- {pgtype.Timestamptz{InfinityModifier: pgtype.Infinity, Valid: true}, new(pgtype.Timestamptz), isExpectedEq(pgtype.Timestamptz{InfinityModifier: pgtype.Infinity, Valid: true})},
- {pgtype.Timestamptz{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, new(pgtype.Timestamptz), isExpectedEq(pgtype.Timestamptz{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
- {pgtype.Timestamptz{}, new(pgtype.Timestamptz), isExpectedEq(pgtype.Timestamptz{})},
- {nil, new(*time.Time), isExpectedEq((*time.Time)(nil))},
+ {Param: pgtype.Timestamptz{InfinityModifier: pgtype.Infinity, Valid: true}, Result: new(pgtype.Timestamptz), Test: isExpectedEq(pgtype.Timestamptz{InfinityModifier: pgtype.Infinity, Valid: true})},
+ {Param: pgtype.Timestamptz{InfinityModifier: pgtype.NegativeInfinity, Valid: true}, Result: new(pgtype.Timestamptz), Test: isExpectedEq(pgtype.Timestamptz{InfinityModifier: pgtype.NegativeInfinity, Valid: true})},
+ {Param: pgtype.Timestamptz{}, Result: new(pgtype.Timestamptz), Test: isExpectedEq(pgtype.Timestamptz{})},
+ {Param: nil, Result: new(*time.Time), Test: isExpectedEq((*time.Time)(nil))},
})
}
@@ -51,7 +51,7 @@ func TestTimestamptzCodecWithLocationUTC(t *testing.T) {
}
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, nil, "timestamptz", []pgxtest.ValueRoundTripTest{
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), new(time.Time), isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), Result: new(time.Time), Test: isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC))},
})
}
@@ -68,7 +68,7 @@ func TestTimestamptzCodecWithLocationLocal(t *testing.T) {
}
pgxtest.RunValueRoundTripTests(context.Background(), t, connTestRunner, nil, "timestamptz", []pgxtest.ValueRoundTripTest{
- {time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), new(time.Time), isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
+ {Param: time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), Result: new(time.Time), Test: isExpectedEq(time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local))},
})
}
diff --git a/pgtype/uint32_test.go b/pgtype/uint32_test.go
index efa4e2730..5a5bd3c75 100644
--- a/pgtype/uint32_test.go
+++ b/pgtype/uint32_test.go
@@ -11,12 +11,12 @@ import (
func TestUint32Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "oid", []pgxtest.ValueRoundTripTest{
{
- pgtype.Uint32{Uint32: pgtype.TextOID, Valid: true},
- new(pgtype.Uint32),
- isExpectedEq(pgtype.Uint32{Uint32: pgtype.TextOID, Valid: true}),
+ Param: pgtype.Uint32{Uint32: pgtype.TextOID, Valid: true},
+ Result: new(pgtype.Uint32),
+ Test: isExpectedEq(pgtype.Uint32{Uint32: pgtype.TextOID, Valid: true}),
},
- {pgtype.Uint32{}, new(pgtype.Uint32), isExpectedEq(pgtype.Uint32{})},
- {nil, new(pgtype.Uint32), isExpectedEq(pgtype.Uint32{})},
- {"1147", new(string), isExpectedEq("1147")},
+ {Param: pgtype.Uint32{}, Result: new(pgtype.Uint32), Test: isExpectedEq(pgtype.Uint32{})},
+ {Param: nil, Result: new(pgtype.Uint32), Test: isExpectedEq(pgtype.Uint32{})},
+ {Param: "1147", Result: new(string), Test: isExpectedEq("1147")},
})
}
diff --git a/pgtype/uint64_test.go b/pgtype/uint64_test.go
index 33c2622d5..eeadaed73 100644
--- a/pgtype/uint64_test.go
+++ b/pgtype/uint64_test.go
@@ -14,17 +14,17 @@ func TestUint64Codec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "xid8", []pgxtest.ValueRoundTripTest{
{
- pgtype.Uint64{Uint64: 1 << 36, Valid: true},
- new(pgtype.Uint64),
- isExpectedEq(pgtype.Uint64{Uint64: 1 << 36, Valid: true}),
+ Param: pgtype.Uint64{Uint64: 1 << 36, Valid: true},
+ Result: new(pgtype.Uint64),
+ Test: isExpectedEq(pgtype.Uint64{Uint64: 1 << 36, Valid: true}),
},
- {pgtype.Uint64{}, new(pgtype.Uint64), isExpectedEq(pgtype.Uint64{})},
- {nil, new(pgtype.Uint64), isExpectedEq(pgtype.Uint64{})},
+ {Param: pgtype.Uint64{}, Result: new(pgtype.Uint64), Test: isExpectedEq(pgtype.Uint64{})},
+ {Param: nil, Result: new(pgtype.Uint64), Test: isExpectedEq(pgtype.Uint64{})},
{
- uint64(1 << 36),
- new(uint64),
- isExpectedEq(uint64(1 << 36)),
+ Param: uint64(1 << 36),
+ Result: new(uint64),
+ Test: isExpectedEq(uint64(1 << 36)),
},
- {"1147", new(string), isExpectedEq("1147")},
+ {Param: "1147", Result: new(string), Test: isExpectedEq("1147")},
})
}
diff --git a/pgtype/uuid_test.go b/pgtype/uuid_test.go
index 255bd92f6..e608585e3 100644
--- a/pgtype/uuid_test.go
+++ b/pgtype/uuid_test.go
@@ -15,50 +15,50 @@ type renamedUUIDByteArray [16]byte
func TestUUIDCodec(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "uuid", []pgxtest.ValueRoundTripTest{
{
- pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true},
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true},
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
{
- "00010203-0405-0607-0809-0a0b0c0d0e0f",
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: "00010203-0405-0607-0809-0a0b0c0d0e0f",
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
{
- "000102030405060708090a0b0c0d0e0f",
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: "000102030405060708090a0b0c0d0e0f",
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
{
- pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true},
- new(string),
- isExpectedEq("00010203-0405-0607-0809-0a0b0c0d0e0f"),
+ Param: pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true},
+ Result: new(string),
+ Test: isExpectedEq("00010203-0405-0607-0809-0a0b0c0d0e0f"),
},
- {pgtype.UUID{}, new([]byte), isExpectedEqBytes([]byte(nil))},
- {pgtype.UUID{}, new(pgtype.UUID), isExpectedEq(pgtype.UUID{})},
- {nil, new(pgtype.UUID), isExpectedEq(pgtype.UUID{})},
+ {Param: pgtype.UUID{}, Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: pgtype.UUID{}, Result: new(pgtype.UUID), Test: isExpectedEq(pgtype.UUID{})},
+ {Param: nil, Result: new(pgtype.UUID), Test: isExpectedEq(pgtype.UUID{})},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "uuid", []pgxtest.ValueRoundTripTest{
{
- [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
{
- renamedUUIDByteArray{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: renamedUUIDByteArray{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
{
- []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- new(renamedUUIDByteArray),
- isExpectedEq(renamedUUIDByteArray{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}),
+ Param: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ Result: new(renamedUUIDByteArray),
+ Test: isExpectedEq(renamedUUIDByteArray{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}),
},
{
- []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- new(pgtype.UUID),
- isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
+ Param: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ Result: new(pgtype.UUID),
+ Test: isExpectedEq(pgtype.UUID{Bytes: [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, Valid: true}),
},
})
}
diff --git a/pgtype/xml_test.go b/pgtype/xml_test.go
index 2c0b899a5..3fde59fcb 100644
--- a/pgtype/xml_test.go
+++ b/pgtype/xml_test.go
@@ -21,29 +21,29 @@ type xmlStruct struct {
func TestXMLCodec(t *testing.T) {
skipCockroachDB(t, "CockroachDB does not support XML.")
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "xml", []pgxtest.ValueRoundTripTest{
- {nil, new(*xmlStruct), isExpectedEq((*xmlStruct)(nil))},
- {map[string]any(nil), new(*string), isExpectedEq((*string)(nil))},
- {map[string]any(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {[]byte(nil), new([]byte), isExpectedEqBytes([]byte(nil))},
- {nil, new([]byte), isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new(*xmlStruct), Test: isExpectedEq((*xmlStruct)(nil))},
+ {Param: map[string]any(nil), Result: new(*string), Test: isExpectedEq((*string)(nil))},
+ {Param: map[string]any(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: []byte(nil), Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
+ {Param: nil, Result: new([]byte), Test: isExpectedEqBytes([]byte(nil))},
// Test sql.Scanner.
- {"", new(sql.NullString), isExpectedEq(sql.NullString{String: "", Valid: true})},
+ {Param: "", Result: new(sql.NullString), Test: isExpectedEq(sql.NullString{String: "", Valid: true})},
// Test driver.Valuer.
- {sql.NullString{String: "", Valid: true}, new(sql.NullString), isExpectedEq(sql.NullString{String: "", Valid: true})},
+ {Param: sql.NullString{String: "", Valid: true}, Result: new(sql.NullString), Test: isExpectedEq(sql.NullString{String: "", Valid: true})},
})
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, pgxtest.KnownOIDQueryExecModes, "xml", []pgxtest.ValueRoundTripTest{
- {[]byte(``), new([]byte), isExpectedEqBytes([]byte(``))},
- {[]byte(``), new([]byte), isExpectedEqBytes([]byte(``))},
- {[]byte(``), new(string), isExpectedEq(``)},
- {[]byte(``), new([]byte), isExpectedEqBytes([]byte(``))},
- {[]byte(``), new(string), isExpectedEq(``)},
- {[]byte(""), new([]byte), isExpectedEqBytes([]byte(""))},
- {xmlStruct{Name: "Adam", Age: 10}, new(xmlStruct), isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
- {xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10}, new(xmlStruct), isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
- {[]byte(`Adam`), new(xmlStruct), isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
+ {Param: []byte(``), Result: new([]byte), Test: isExpectedEqBytes([]byte(``))},
+ {Param: []byte(``), Result: new([]byte), Test: isExpectedEqBytes([]byte(``))},
+ {Param: []byte(``), Result: new(string), Test: isExpectedEq(``)},
+ {Param: []byte(``), Result: new([]byte), Test: isExpectedEqBytes([]byte(``))},
+ {Param: []byte(``), Result: new(string), Test: isExpectedEq(``)},
+ {Param: []byte(""), Result: new([]byte), Test: isExpectedEqBytes([]byte(""))},
+ {Param: xmlStruct{Name: "Adam", Age: 10}, Result: new(xmlStruct), Test: isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
+ {Param: xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10}, Result: new(xmlStruct), Test: isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
+ {Param: []byte(`Adam`), Result: new(xmlStruct), Test: isExpectedEq(xmlStruct{XMLName: xml.Name{Local: "person"}, Name: "Adam", Age: 10})},
})
}
diff --git a/pgtype/zeronull/float8_test.go b/pgtype/zeronull/float8_test.go
index b3c818aaa..63663c94c 100644
--- a/pgtype/zeronull/float8_test.go
+++ b/pgtype/zeronull/float8_test.go
@@ -17,19 +17,19 @@ func isExpectedEq(a any) func(any) bool {
func TestFloat8Transcode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "float8", []pgxtest.ValueRoundTripTest{
{
- (zeronull.Float8)(1),
- new(zeronull.Float8),
- isExpectedEq((zeronull.Float8)(1)),
+ Param: zeronull.Float8(1),
+ Result: new(zeronull.Float8),
+ Test: isExpectedEq(zeronull.Float8(1)),
},
{
- nil,
- new(zeronull.Float8),
- isExpectedEq((zeronull.Float8)(0)),
+ Param: nil,
+ Result: new(zeronull.Float8),
+ Test: isExpectedEq(zeronull.Float8(0)),
},
{
- (zeronull.Float8)(0),
- new(any),
- isExpectedEq(nil),
+ Param: zeronull.Float8(0),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgtype/zeronull/int.go b/pgtype/zeronull/int.go
index e36aa2466..cbd9d27e4 100644
--- a/pgtype/zeronull/int.go
+++ b/pgtype/zeronull/int.go
@@ -136,10 +136,10 @@ func (dst *Int8) ScanInt64(n pgtype.Int8) error {
return nil
}
- if n.Int64 < math.MinInt64 {
+ if n.Int64 < math.MinInt8 {
return fmt.Errorf("%d is less than minimum value for Int8", n.Int64)
}
- if n.Int64 > math.MaxInt64 {
+ if n.Int64 > math.MaxInt8 {
return fmt.Errorf("%d is greater than maximum value for Int8", n.Int64)
}
*dst = Int8(n.Int64)
diff --git a/pgtype/zeronull/int_test.go b/pgtype/zeronull/int_test.go
index 7e32064ab..2fa62a1d3 100644
--- a/pgtype/zeronull/int_test.go
+++ b/pgtype/zeronull/int_test.go
@@ -13,19 +13,19 @@ import (
func TestInt2Transcode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "int2", []pgxtest.ValueRoundTripTest{
{
- (zeronull.Int2)(1),
- new(zeronull.Int2),
- isExpectedEq((zeronull.Int2)(1)),
+ Param: (zeronull.Int2)(1),
+ Result: new(zeronull.Int2),
+ Test: isExpectedEq((zeronull.Int2)(1)),
},
{
- nil,
- new(zeronull.Int2),
- isExpectedEq((zeronull.Int2)(0)),
+ Param: nil,
+ Result: new(zeronull.Int2),
+ Test: isExpectedEq((zeronull.Int2)(0)),
},
{
- (zeronull.Int2)(0),
- new(any),
- isExpectedEq(nil),
+ Param: (zeronull.Int2)(0),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgtype/zeronull/text_test.go b/pgtype/zeronull/text_test.go
index 5a60baf18..7618be161 100644
--- a/pgtype/zeronull/text_test.go
+++ b/pgtype/zeronull/text_test.go
@@ -11,19 +11,19 @@ import (
func TestTextTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "text", []pgxtest.ValueRoundTripTest{
{
- (zeronull.Text)("foo"),
- new(zeronull.Text),
- isExpectedEq((zeronull.Text)("foo")),
+ Param: zeronull.Text("foo"),
+ Result: new(zeronull.Text),
+ Test: isExpectedEq(zeronull.Text("foo")),
},
{
- nil,
- new(zeronull.Text),
- isExpectedEq((zeronull.Text)("")),
+ Param: nil,
+ Result: new(zeronull.Text),
+ Test: isExpectedEq(zeronull.Text("")),
},
{
- (zeronull.Text)(""),
- new(any),
- isExpectedEq(nil),
+ Param: zeronull.Text(""),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgtype/zeronull/timestamp_test.go b/pgtype/zeronull/timestamp_test.go
index 8a5a57966..890f82d50 100644
--- a/pgtype/zeronull/timestamp_test.go
+++ b/pgtype/zeronull/timestamp_test.go
@@ -21,19 +21,19 @@ func isExpectedEqTimestamp(a any) func(any) bool {
func TestTimestampTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "timestamp", []pgxtest.ValueRoundTripTest{
{
- (zeronull.Timestamp)(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)),
- new(zeronull.Timestamp),
- isExpectedEqTimestamp((zeronull.Timestamp)(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC))),
+ Param: zeronull.Timestamp(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)),
+ Result: new(zeronull.Timestamp),
+ Test: isExpectedEqTimestamp(zeronull.Timestamp(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC))),
},
{
- nil,
- new(zeronull.Timestamp),
- isExpectedEqTimestamp((zeronull.Timestamp)(time.Time{})),
+ Param: nil,
+ Result: new(zeronull.Timestamp),
+ Test: isExpectedEqTimestamp(zeronull.Timestamp(time.Time{})),
},
{
- (zeronull.Timestamp)(time.Time{}),
- new(any),
- isExpectedEq(nil),
+ Param: zeronull.Timestamp(time.Time{}),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgtype/zeronull/timestamptz_test.go b/pgtype/zeronull/timestamptz_test.go
index 0a6d380ba..53b5b958b 100644
--- a/pgtype/zeronull/timestamptz_test.go
+++ b/pgtype/zeronull/timestamptz_test.go
@@ -21,19 +21,19 @@ func isExpectedEqTimestamptz(a any) func(any) bool {
func TestTimestamptzTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "timestamptz", []pgxtest.ValueRoundTripTest{
{
- (zeronull.Timestamptz)(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)),
- new(zeronull.Timestamptz),
- isExpectedEqTimestamptz((zeronull.Timestamptz)(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC))),
+ Param: zeronull.Timestamptz(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)),
+ Result: new(zeronull.Timestamptz),
+ Test: isExpectedEqTimestamptz(zeronull.Timestamptz(time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC))),
},
{
- nil,
- new(zeronull.Timestamptz),
- isExpectedEqTimestamptz((zeronull.Timestamptz)(time.Time{})),
+ Param: nil,
+ Result: new(zeronull.Timestamptz),
+ Test: isExpectedEqTimestamptz(zeronull.Timestamptz(time.Time{})),
},
{
- (zeronull.Timestamptz)(time.Time{}),
- new(any),
- isExpectedEq(nil),
+ Param: zeronull.Timestamptz(time.Time{}),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgtype/zeronull/uuid_test.go b/pgtype/zeronull/uuid_test.go
index c50cb300b..e6bda3174 100644
--- a/pgtype/zeronull/uuid_test.go
+++ b/pgtype/zeronull/uuid_test.go
@@ -11,19 +11,19 @@ import (
func TestUUIDTranscode(t *testing.T) {
pgxtest.RunValueRoundTripTests(context.Background(), t, defaultConnTestRunner, nil, "uuid", []pgxtest.ValueRoundTripTest{
{
- (zeronull.UUID)([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}),
- new(zeronull.UUID),
- isExpectedEq((zeronull.UUID)([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})),
+ Param: zeronull.UUID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}),
+ Result: new(zeronull.UUID),
+ Test: isExpectedEq(zeronull.UUID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15})),
},
{
- nil,
- new(zeronull.UUID),
- isExpectedEq((zeronull.UUID)([16]byte{})),
+ Param: nil,
+ Result: new(zeronull.UUID),
+ Test: isExpectedEq(zeronull.UUID([16]byte{})),
},
{
- (zeronull.UUID)([16]byte{}),
- new(any),
- isExpectedEq(nil),
+ Param: zeronull.UUID([16]byte{}),
+ Result: new(any),
+ Test: isExpectedEq(nil),
},
})
}
diff --git a/pgxpool/bench_test.go b/pgxpool/bench_test.go
index 2748ddb2b..5e3cf70db 100644
--- a/pgxpool/bench_test.go
+++ b/pgxpool/bench_test.go
@@ -42,7 +42,7 @@ func BenchmarkMinimalPreparedSelectBaseline(b *testing.B) {
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err = conn.QueryRow(context.Background(), "ps1", i).Scan(&n)
if err != nil {
b.Fatal(err)
@@ -68,7 +68,7 @@ func BenchmarkMinimalPreparedSelect(b *testing.B) {
var n int64
- for i := 0; b.Loop(); i++ {
+ for i := range b.N {
err = db.QueryRow(context.Background(), "ps1", i).Scan(&n)
if err != nil {
b.Fatal(err)
diff --git a/pgxpool/conn.go b/pgxpool/conn.go
index 38c90f3da..53e720ffc 100644
--- a/pgxpool/conn.go
+++ b/pgxpool/conn.go
@@ -109,7 +109,7 @@ func (c *Conn) Begin(ctx context.Context) (pgx.Tx, error) {
}
// BeginTx starts a transaction block from the *Conn with txOptions determining the transaction mode.
-func (c *Conn) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+func (c *Conn) BeginTx(ctx context.Context, txOptions *pgx.TxOptions) (pgx.Tx, error) {
return c.Conn().BeginTx(ctx, txOptions)
}
diff --git a/pgxpool/pool.go b/pgxpool/pool.go
index df97bdbd7..79c67193d 100644
--- a/pgxpool/pool.go
+++ b/pgxpool/pool.go
@@ -528,25 +528,27 @@ func (p *Pool) checkHealth() {
// checkConnsHealth will check all idle connections, destroy a connection if
// it's idle or too old, and returns true if any were destroyed
func (p *Pool) checkConnsHealth() bool {
- var destroyed bool
- totalConns := p.Stat().TotalConns()
- resources := p.p.AcquireAllIdle()
- for _, res := range resources {
+ var (
+ destroyed bool
+ totalConns = p.Stat().TotalConns()
+ resources = p.p.AcquireAllIdle()
+ )
+ for i := range resources {
// We're okay going under minConns if the lifetime is up
- if p.isExpired(res) && totalConns >= p.minConns {
+ if p.isExpired(resources[i]) && totalConns >= p.minConns {
atomic.AddInt64(&p.lifetimeDestroyCount, 1)
- res.Destroy()
+ resources[i].Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
- } else if res.IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
+ } else if resources[i].IdleDuration() > p.maxConnIdleTime && totalConns > p.minConns {
atomic.AddInt64(&p.idleDestroyCount, 1)
- res.Destroy()
+ resources[i].Destroy()
destroyed = true
// Since Destroy is async we manually decrement totalConns.
totalConns--
} else {
- res.ReleaseUnused()
+ resources[i].ReleaseUnused()
}
}
return destroyed
@@ -671,18 +673,21 @@ func (p *Pool) AcquireFunc(ctx context.Context, f func(*Conn) error) error {
// AcquireAllIdle atomically acquires all currently idle connections. Its intended use is for health check and
// keep-alive functionality. It does not update pool statistics.
func (p *Pool) AcquireAllIdle(ctx context.Context) []*Conn {
- resources := p.p.AcquireAllIdle()
- conns := make([]*Conn, 0, len(resources))
- for _, res := range resources {
- cr := res.Value()
+
+ var (
+ resources = p.p.AcquireAllIdle()
+ conns = make([]*Conn, 0, len(resources))
+ )
+ for i := range resources {
+ cr := resources[i].Value()
if p.prepareConn != nil {
ok, err := p.prepareConn(ctx, cr.conn)
if !ok || err != nil {
- res.Destroy()
+ resources[i].Destroy()
continue
}
}
- conns = append(conns, cr.getConn(p, res))
+ conns = append(conns, cr.getConn(p, resources[i]))
}
return conns
@@ -786,14 +791,14 @@ func (p *Pool) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults {
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
func (p *Pool) Begin(ctx context.Context) (pgx.Tx, error) {
- return p.BeginTx(ctx, pgx.TxOptions{})
+ return p.BeginTx(ctx, &pgx.TxOptions{})
}
// BeginTx acquires a connection from the Pool and starts a transaction with pgx.TxOptions determining the transaction mode.
// Unlike database/sql, the context only affects the begin command. i.e. there is no auto-rollback on context cancellation.
// *pgxpool.Tx is returned, which implements the pgx.Tx interface.
// Commit or Rollback must be called on the returned transaction to finalize the transaction block.
-func (p *Pool) BeginTx(ctx context.Context, txOptions pgx.TxOptions) (pgx.Tx, error) {
+func (p *Pool) BeginTx(ctx context.Context, txOptions *pgx.TxOptions) (pgx.Tx, error) {
c, err := p.Acquire(ctx)
if err != nil {
return nil, err
diff --git a/pgxpool/pool_test.go b/pgxpool/pool_test.go
index 8579a065a..602ab43e7 100644
--- a/pgxpool/pool_test.go
+++ b/pgxpool/pool_test.go
@@ -1057,7 +1057,7 @@ func TestConnReleaseWhenBeginFail(t *testing.T) {
require.NoError(t, err)
defer db.Close()
- tx, err := db.BeginTx(ctx, pgx.TxOptions{
+ tx, err := db.BeginTx(ctx, &pgx.TxOptions{
IsoLevel: pgx.TxIsoLevel("foo"),
})
assert.Error(t, err)
diff --git a/pgxtest/pgxtest.go b/pgxtest/pgxtest.go
index ece6d91b8..8c52c9107 100644
--- a/pgxtest/pgxtest.go
+++ b/pgxtest/pgxtest.go
@@ -156,7 +156,7 @@ func SkipCockroachDB(t testing.TB, conn *pgx.Conn, msg string) {
func SkipPostgreSQLVersionLessThan(t testing.TB, conn *pgx.Conn, minVersion int64) {
serverVersionStr := conn.PgConn().ParameterStatus("server_version")
- serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ serverVersionStr = regexp.MustCompile(`^\d+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
return
diff --git a/query_test.go b/query_test.go
index 0a1f553fa..7b1276bb3 100644
--- a/query_test.go
+++ b/query_test.go
@@ -90,21 +90,24 @@ func TestConnQueryScanWithManyColumns(t *testing.T) {
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
- columnCount := 1000
- sql := "select "
+ var (
+ columnCount = 1000
+ sqlVar = "select "
+ )
for i := range columnCount {
if i > 0 {
- sql += ","
+ sqlVar += ","
}
- sql += fmt.Sprintf(" %d", i)
+ sqlVar += fmt.Sprintf(" %d", i)
}
- sql += " from generate_series(1,5)"
-
- dest := make([]int, columnCount)
+ sqlVar += " from generate_series(1,5)"
- var rowCount int
+ var (
+ dest = make([]int, columnCount)
+ rowCount int
+ )
- rows, err := conn.Query(context.Background(), sql)
+ rows, err := conn.Query(context.Background(), sqlVar)
if err != nil {
t.Fatalf("conn.Query failed: %v", err)
}
@@ -546,9 +549,9 @@ func TestConnQueryErrorWhileReturningRows(t *testing.T) {
for range 100 {
func() {
- sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
+ sqlVar := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
- rows, err := conn.Query(context.Background(), sql)
+ rows, err := conn.Query(context.Background(), sqlVar)
if err != nil {
t.Fatal(err)
}
@@ -1904,13 +1907,15 @@ func TestConnSimpleProtocol(t *testing.T) {
{[]float64{1, 2, 3}},
}
for i, tt := range tests {
- var actual []float64
- err := conn.QueryRow(
- context.Background(),
- "select $1::float8[]",
- pgx.QueryExecModeSimpleProtocol,
- tt.expected,
- ).Scan(&actual)
+ var (
+ actual []float64
+ err = conn.QueryRow(
+ context.Background(),
+ "select $1::float8[]",
+ pgx.QueryExecModeSimpleProtocol,
+ tt.expected,
+ ).Scan(&actual)
+ )
assert.NoErrorf(t, err, "%d", i)
assert.Equalf(t, tt.expected, actual, "%d", i)
}
@@ -1918,45 +1923,45 @@ func TestConnSimpleProtocol(t *testing.T) {
// Test high-level type
- {
- if conn.PgConn().ParameterStatus("crdb_version") == "" {
- // CockroachDB doesn't support circle type.
- expected := pgtype.Circle{P: pgtype.Vec2{X: 1, Y: 2}, R: 1.5, Valid: true}
- actual := expected
- err := conn.QueryRow(
- context.Background(),
- "select $1::circle",
- pgx.QueryExecModeSimpleProtocol,
- &expected,
- ).Scan(&actual)
- if err != nil {
- t.Error(err)
- }
- if expected != actual {
- t.Errorf("expected %v got %v", expected, actual)
- }
+ if conn.PgConn().ParameterStatus("crdb_version") == "" {
+ // CockroachDB doesn't support circle type.
+ expected := pgtype.Circle{P: pgtype.Vec2{X: 1, Y: 2}, R: 1.5, Valid: true}
+ actual := expected
+ err := conn.QueryRow(
+ context.Background(),
+ "select $1::circle",
+ pgx.QueryExecModeSimpleProtocol,
+ &expected,
+ ).Scan(&actual)
+ if err != nil {
+ t.Error(err)
+ }
+ if expected != actual {
+ t.Errorf("expected %v got %v", expected, actual)
}
}
// Test multiple args in single query
{
- expectedInt64 := int64(234423)
- expectedFloat64 := float64(-0.2312)
- expectedBool := true
- expectedBytes := []byte{255, 0, 23, 16, 87, 45, 9, 23, 45, 223}
- expectedString := "test"
- var actualInt64 int64
- var actualFloat64 float64
- var actualBool bool
- var actualBytes []byte
- var actualString string
- err := conn.QueryRow(
- context.Background(),
- "select $1::int8, $2::float8, $3::boolean, $4::bytea, $5::text",
- pgx.QueryExecModeSimpleProtocol,
- expectedInt64, expectedFloat64, expectedBool, expectedBytes, expectedString,
- ).Scan(&actualInt64, &actualFloat64, &actualBool, &actualBytes, &actualString)
+ var (
+ expectedInt64 = int64(234423)
+ expectedFloat64 = float64(-0.2312)
+ expectedBool = true
+ expectedBytes = []byte{255, 0, 23, 16, 87, 45, 9, 23, 45, 223}
+ expectedString = "test"
+ actualInt64 int64
+ actualFloat64 float64
+ actualBool bool
+ actualBytes []byte
+ actualString string
+ err = conn.QueryRow(
+ context.Background(),
+ "select $1::int8, $2::float8, $3::boolean, $4::bytea, $5::text",
+ pgx.QueryExecModeSimpleProtocol,
+ expectedInt64, expectedFloat64, expectedBool, expectedBytes, expectedString,
+ ).Scan(&actualInt64, &actualFloat64, &actualBool, &actualBytes, &actualString)
+ )
if err != nil {
t.Error(err)
}
@@ -1980,14 +1985,16 @@ func TestConnSimpleProtocol(t *testing.T) {
// Test dangerous cases
{
- expected := "foo';drop table users;"
- var actual string
- err := conn.QueryRow(
- context.Background(),
- "select $1",
- pgx.QueryExecModeSimpleProtocol,
- expected,
- ).Scan(&actual)
+ var (
+ expected = "foo';drop table users;"
+ actual string
+ err = conn.QueryRow(
+ context.Background(),
+ "select $1",
+ pgx.QueryExecModeSimpleProtocol,
+ expected,
+ ).Scan(&actual)
+ )
if err != nil {
t.Error(err)
}
diff --git a/rows.go b/rows.go
index ac02ba9a0..aae31a2f1 100644
--- a/rows.go
+++ b/rows.go
@@ -99,8 +99,8 @@ func (r *connRow) Scan(dest ...any) (err error) {
return rows.Err()
}
- for _, d := range dest {
- if _, ok := d.(*pgtype.DriverBytes); ok {
+ for i := range dest {
+ if _, ok := dest[i].(*pgtype.DriverBytes); ok {
rows.Close()
return fmt.Errorf("cannot scan into *pgtype.DriverBytes from QueryRow")
}
@@ -268,17 +268,17 @@ func (rows *baseRows) Scan(dest ...any) error {
}
}
- for i, dst := range dest {
- if dst == nil {
+ for i := range dest {
+ if dest[i] == nil {
continue
}
- if rows.scanTypes[i] != reflect.TypeOf(dst) {
+ if rows.scanTypes[i] != reflect.TypeOf(dest[i]) {
rows.scanPlans[i] = m.PlanScan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, dest[i])
rows.scanTypes[i] = reflect.TypeOf(dest[i])
}
- err := rows.scanPlans[i].Scan(values[i], dst)
+ err := rows.scanPlans[i].Scan(values[i], dest[i])
if err != nil {
err = ScanArgError{ColumnIndex: i, FieldName: fieldDescriptions[i].Name, Err: err}
rows.fatal(err)
@@ -372,12 +372,12 @@ func ScanRow(typeMap *pgtype.Map, fieldDescriptions []pgconn.FieldDescription, v
return fmt.Errorf("number of field descriptions must equal number of destinations, got %d and %d", len(fieldDescriptions), len(dest))
}
- for i, d := range dest {
- if d == nil {
+ for i := range dest {
+ if dest[i] == nil {
continue
}
- err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], d)
+ err := typeMap.Scan(fieldDescriptions[i].DataTypeOID, fieldDescriptions[i].Format, values[i], dest[i])
if err != nil {
return ScanArgError{ColumnIndex: i, FieldName: fieldDescriptions[i].Name, Err: err}
}
@@ -470,7 +470,7 @@ func CollectOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
var err error
if !rows.Next() {
- if err = rows.Err(); err != nil {
+ if err := rows.Err(); err != nil {
return value, err
}
return value, ErrNoRows
@@ -501,7 +501,7 @@ func CollectExactlyOneRow[T any](rows Rows, fn RowToFunc[T]) (T, error) {
)
if !rows.Next() {
- if err = rows.Err(); err != nil {
+ if err := rows.Err(); err != nil {
return value, err
}
@@ -617,7 +617,7 @@ func computeStructFields(
) []structRowField {
tail := len(*fieldStack)
*fieldStack = append(*fieldStack, 0)
- for i := 0; i < t.NumField(); i++ {
+ for i := range t.NumField() {
sf := t.Field(i)
(*fieldStack)[tail] = i
// Handle anonymous struct embedding, but do not try to handle embedded pointers.
@@ -733,8 +733,8 @@ func lookupNamedStructFields(
make([]structRowField, len(fldDescs)),
&fieldStack,
)
- for i, f := range fields {
- if f.path == nil {
+ for i := range fields {
+ if fields[i].path == nil {
return nil, fmt.Errorf(
"struct doesn't have corresponding row field %s",
fldDescs[i].Name,
@@ -758,15 +758,15 @@ func joinFieldNames(fldDescs []pgconn.FieldDescription) string {
}
totalSize := len(fldDescs) - 1 // Space for separator bytes.
- for _, d := range fldDescs {
- totalSize += len(d.Name)
+ for i := range fldDescs {
+ totalSize += len(fldDescs[i].Name)
}
var b strings.Builder
b.Grow(totalSize)
b.WriteString(fldDescs[0].Name)
- for _, d := range fldDescs[1:] {
+ for i := range fldDescs[1:] {
b.WriteByte(0) // Join with NUL byte as it's (presumably) not a valid column character.
- b.WriteString(d.Name)
+ b.WriteString(fldDescs[1:][i].Name)
}
return b.String()
}
@@ -780,7 +780,7 @@ func computeNamedStructFields(
var missingField string
tail := len(*fieldStack)
*fieldStack = append(*fieldStack, 0)
- for i := 0; i < t.NumField(); i++ {
+ for i := range t.NumField() {
sf := t.Field(i)
(*fieldStack)[tail] = i
if sf.PkgPath != "" && !sf.Anonymous {
@@ -837,13 +837,13 @@ func fieldPosByName(fldDescs []pgconn.FieldDescription, field string, normalize
if normalize {
field = strings.ReplaceAll(field, "_", "")
}
- for i, desc := range fldDescs {
+ for i := range fldDescs {
if normalize {
- if strings.EqualFold(strings.ReplaceAll(desc.Name, "_", ""), field) {
+ if strings.EqualFold(strings.ReplaceAll(fldDescs[i].Name, "_", ""), field) {
return i
}
} else {
- if desc.Name == field {
+ if fldDescs[i].Name == field {
return i
}
}
@@ -864,8 +864,8 @@ type structRowField struct {
func setupStructScanTargets(receiver any, fields []structRowField) []any {
scanTargets := make([]any, len(fields))
v := reflect.ValueOf(receiver).Elem()
- for i, f := range fields {
- scanTargets[i] = v.FieldByIndex(f.path).Addr().Interface()
+ for i := range fields {
+ scanTargets[i] = v.FieldByIndex(fields[i].path).Addr().Interface()
}
return scanTargets
}
diff --git a/rows_test.go b/rows_test.go
index 4cda957fc..728d853f8 100644
--- a/rows_test.go
+++ b/rows_test.go
@@ -902,13 +902,13 @@ func TestRowToStructByNameLaxRowValue(t *testing.T) {
type AnotherTable struct{}
type User struct {
UserID int `json:"userId" db:"user_id"`
- Name string `json:"name" db:"name"`
+ Name string `json:"name" db:"name"`
}
type UserAPIKey struct {
UserAPIKeyID int `json:"userApiKeyId" db:"user_api_key_id"`
- UserID int `json:"userId" db:"user_id"`
+ UserID int `json:"userId" db:"user_id"`
- User *User `json:"user" db:"user"`
+ User *User `json:"user" db:"user"`
AnotherTable *AnotherTable `json:"anotherTable" db:"another_table"`
}
diff --git a/stdlib/sql.go b/stdlib/sql.go
index 298bb48cd..41707001e 100644
--- a/stdlib/sql.go
+++ b/stdlib/sql.go
@@ -204,19 +204,19 @@ func RandomizeHostOrderFunc(ctx context.Context, connConfig *pgx.ConnConfig) err
return nil
}
-func GetConnector(config pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector {
+func GetConnector(config *pgx.ConnConfig, opts ...OptionOpenDB) driver.Connector {
c := connector{
- ConnConfig: config,
+ ConnConfig: *config,
BeforeConnect: func(context.Context, *pgx.ConnConfig) error { return nil }, // noop before connect by default
AfterConnect: func(context.Context, *pgx.Conn) error { return nil }, // noop after connect by default
ResetSession: func(context.Context, *pgx.Conn) error { return nil }, // noop reset session by default
driver: pgxDriver,
}
- for _, opt := range opts {
- opt(&c)
+ for i := range opts {
+ opts[i](&c)
}
- return c
+ return &c
}
// GetPoolConnector creates a new driver.Connector from the given *pgxpool.Pool. By using this be sure to set the
@@ -230,14 +230,14 @@ func GetPoolConnector(pool *pgxpool.Pool, opts ...OptionOpenDB) driver.Connector
driver: pgxDriver,
}
- for _, opt := range opts {
- opt(&c)
+ for i := range opts {
+ opts[i](&c)
}
- return c
+ return &c
}
-func OpenDB(config pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
+func OpenDB(config *pgx.ConnConfig, opts ...OptionOpenDB) *sql.DB {
c := GetConnector(config, opts...)
return sql.OpenDB(c)
}
@@ -264,11 +264,11 @@ type connector struct {
}
// Connect implement driver.Connector interface
-func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
var (
connConfig pgx.ConnConfig
conn *pgx.Conn
- close func(context.Context) error
+ closeVar func(context.Context) error
err error
)
@@ -276,7 +276,7 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
// Create a shallow copy of the config, so that BeforeConnect can safely modify it
connConfig = c.ConnConfig
- if err = c.BeforeConnect(ctx, &connConfig); err != nil {
+ if err := c.BeforeConnect(ctx, &connConfig); err != nil {
return nil, err
}
@@ -284,11 +284,11 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
return nil, err
}
- if err = c.AfterConnect(ctx, conn); err != nil {
+ if err := c.AfterConnect(ctx, conn); err != nil {
return nil, err
}
- close = conn.Close
+ closeVar = conn.Close
} else {
var pconn *pgxpool.Conn
@@ -299,7 +299,7 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
conn = pconn.Conn()
- close = func(_ context.Context) error {
+ closeVar = func(_ context.Context) error {
pconn.Release()
return nil
}
@@ -307,7 +307,7 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
return &Conn{
conn: conn,
- close: close,
+ close: closeVar,
driver: c.driver,
connConfig: connConfig,
resetSessionFunc: c.ResetSession,
@@ -317,7 +317,7 @@ func (c connector) Connect(ctx context.Context) (driver.Conn, error) {
}
// Driver implement driver.Connector interface
-func (c connector) Driver() driver.Driver {
+func (c *connector) Driver() driver.Driver {
return c.driver
}
@@ -489,7 +489,7 @@ func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, e
pgxOpts.AccessMode = pgx.ReadOnly
}
- tx, err := c.conn.BeginTx(ctx, pgxOpts)
+ tx, err := c.conn.BeginTx(ctx, &pgxOpts)
if err != nil {
return nil, err
}
@@ -646,8 +646,8 @@ func (r *Rows) Columns() []string {
if r.columnNames == nil {
fields := r.rows.FieldDescriptions()
r.columnNames = make([]string, len(fields))
- for i, fd := range fields {
- r.columnNames[i] = string(fd.Name)
+ for i := range fields {
+ r.columnNames[i] = fields[i].Name
}
}
@@ -699,30 +699,41 @@ func (r *Rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok b
// ColumnTypeScanType returns the value type that can be used to scan types into.
func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
- fd := r.rows.FieldDescriptions()[index]
+ fd := r.rows.FieldDescriptions()[index]
switch fd.DataTypeOID {
case pgtype.Float8OID:
return reflect.TypeOf(float64(0))
+
case pgtype.Float4OID:
return reflect.TypeOf(float32(0))
+
case pgtype.Int8OID:
return reflect.TypeOf(int64(0))
+
case pgtype.Int4OID:
return reflect.TypeOf(int32(0))
+
case pgtype.Int2OID:
return reflect.TypeOf(int16(0))
+
case pgtype.BoolOID:
return reflect.TypeOf(false)
+
case pgtype.NumericOID:
return reflect.TypeOf(float64(0))
+
case pgtype.DateOID, pgtype.TimestampOID, pgtype.TimestamptzOID:
return reflect.TypeOf(time.Time{})
+
case pgtype.ByteaOID:
return reflect.TypeOf([]byte(nil))
+
default:
return reflect.TypeOf("")
+
}
+
}
func (r *Rows) Close() error {
@@ -731,34 +742,47 @@ func (r *Rows) Close() error {
}
func (r *Rows) Next(dest []driver.Value) error {
- m := r.conn.conn.TypeMap()
- fieldDescriptions := r.rows.FieldDescriptions()
+ var (
+ m = r.conn.conn.TypeMap()
+ fieldDescriptions = r.rows.FieldDescriptions()
+ )
if r.valueFuncs == nil {
r.valueFuncs = make([]rowValueFunc, len(fieldDescriptions))
- for i, fd := range fieldDescriptions {
- dataTypeOID := fd.DataTypeOID
- format := fd.Format
+ for i := range fieldDescriptions {
- switch fd.DataTypeOID {
+ var (
+ dataTypeOID = fieldDescriptions[i].DataTypeOID
+ format = fieldDescriptions[i].Format
+ )
+
+ switch fieldDescriptions[i].DataTypeOID {
case pgtype.BoolOID:
- var d bool
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d bool
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return d, err
}
+
case pgtype.ByteaOID:
- var d []byte
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d []byte
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return d, err
}
+
case pgtype.CIDOID, pgtype.OIDOID, pgtype.XIDOID:
- var d pgtype.Uint32
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d pgtype.Uint32
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -766,9 +790,12 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
+
case pgtype.DateOID:
- var d pgtype.Date
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d pgtype.Date
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -776,44 +803,62 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
+
case pgtype.Float4OID:
- var d float32
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d float32
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return float64(d), err
}
+
case pgtype.Float8OID:
- var d float64
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d float64
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return d, err
}
+
case pgtype.Int2OID:
- var d int16
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d int16
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return int64(d), err
}
+
case pgtype.Int4OID:
- var d int32
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d int32
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return int64(d), err
}
+
case pgtype.Int8OID:
- var d int64
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d int64
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return d, err
}
+
case pgtype.JSONOID, pgtype.JSONBOID:
- var d []byte
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d []byte
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -821,9 +866,12 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d, nil
}
+
case pgtype.TimestampOID:
- var d pgtype.Timestamp
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d pgtype.Timestamp
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -831,9 +879,12 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
+
case pgtype.TimestamptzOID:
- var d pgtype.Timestamptz
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d pgtype.Timestamptz
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -841,9 +892,12 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d.Value()
}
+
case pgtype.XMLOID:
- var d []byte
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d []byte
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
if err != nil {
@@ -851,13 +905,17 @@ func (r *Rows) Next(dest []driver.Value) error {
}
return d, nil
}
+
default:
- var d string
- scanPlan := m.PlanScan(dataTypeOID, format, &d)
+ var (
+ d string
+ scanPlan = m.PlanScan(dataTypeOID, format, &d)
+ )
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
err := scanPlan.Scan(src, &d)
return d, err
}
+
}
}
}
@@ -878,10 +936,11 @@ func (r *Rows) Next(dest []driver.Value) error {
}
}
- for i, rv := range r.rows.RawValues() {
- if rv != nil {
+ rvs := r.rows.RawValues()
+ for i := range rvs {
+ if rvs[i] != nil {
var err error
- dest[i], err = r.valueFuncs[i](rv)
+ dest[i], err = r.valueFuncs[i](rvs[i])
if err != nil {
return fmt.Errorf("convert field %d failed: %w", i, err)
}
@@ -894,9 +953,9 @@ func (r *Rows) Next(dest []driver.Value) error {
}
func convertNamedArguments(args []any, argsV []driver.NamedValue) {
- for i, v := range argsV {
- if v.Value != nil {
- args[i] = v.Value.(any)
+ for i := range argsV {
+ if argsV[i].Value != nil {
+ args[i] = argsV[i].Value.(any)
} else {
args[i] = nil
}
diff --git a/stdlib/sql_test.go b/stdlib/sql_test.go
index 4c2eca64d..2f931918a 100644
--- a/stdlib/sql_test.go
+++ b/stdlib/sql_test.go
@@ -31,7 +31,7 @@ func openDB(t testing.TB, opts ...stdlib.OptionOpenDB) *sql.DB {
t.Helper()
config, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
- return stdlib.OpenDB(*config, opts...)
+ return stdlib.OpenDB(config, opts...)
}
func closeDB(t testing.TB, db *sql.DB) {
@@ -62,7 +62,7 @@ func skipPostgreSQLVersionLessThan(t testing.TB, db *sql.DB, minVersion int64) {
err = conn.Raw(func(driverConn any) error {
conn := driverConn.(*stdlib.Conn).Conn()
serverVersionStr := conn.PgConn().ParameterStatus("server_version")
- serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
+ serverVersionStr = regexp.MustCompile(`^\d+`).FindString(serverVersionStr)
// if not PostgreSQL do nothing
if serverVersionStr == "" {
return nil
@@ -96,7 +96,7 @@ func testWithAllQueryExecModes(t *testing.T, f func(t *testing.T, db *sql.DB)) {
require.NoError(t, err)
config.DefaultQueryExecMode = mode
- db := stdlib.OpenDB(*config)
+ db := stdlib.OpenDB(config)
defer func() {
err := db.Close()
require.NoError(t, err)
@@ -122,7 +122,7 @@ func testWithKnownOIDQueryExecModes(t *testing.T, f func(t *testing.T, db *sql.D
require.NoError(t, err)
config.DefaultQueryExecMode = mode
- db := stdlib.OpenDB(*config)
+ db := stdlib.OpenDB(config)
defer func() {
err := db.Close()
require.NoError(t, err)
@@ -166,8 +166,8 @@ type preparer interface {
Prepare(query string) (*sql.Stmt, error)
}
-func prepareStmt(t *testing.T, p preparer, sql string) *sql.Stmt {
- stmt, err := p.Prepare(sql)
+func prepareStmt(t *testing.T, p preparer, sqlVar string) *sql.Stmt {
+ stmt, err := p.Prepare(sqlVar)
require.NoError(t, err)
return stmt
}
@@ -509,7 +509,7 @@ func TestConnQueryRowByteSlice(t *testing.T) {
func TestConnQueryFailure(t *testing.T) {
testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) {
- _, err := db.Query("select 'foo")
+ _, err := db.Exec("select 'foo")
require.Error(t, err)
require.ErrorAs(t, err, new(*pgconn.PgError))
})
@@ -541,11 +541,14 @@ func TestConnQueryScanGoArray(t *testing.T) {
// database/sql native type should be passed through as a string
func TestConnQueryRowPgxBinary(t *testing.T) {
testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) {
- sql := "select $1::int4[]"
- expected := "{1,2,3}"
- var actual string
- err := db.QueryRow(sql, expected).Scan(&actual)
+ var (
+ sqlVar = "select $1::int4[]"
+ expected = "{1,2,3}"
+ actual string
+ err = db.QueryRow(sqlVar, expected).Scan(&actual)
+ )
+
require.NoError(t, err)
require.EqualValues(t, expected, actual)
})
@@ -555,11 +558,13 @@ func TestConnQueryRowUnknownType(t *testing.T) {
testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) {
skipCockroachDB(t, db, "Server does not support point type")
- sql := "select $1::point"
- expected := "(1,2)"
- var actual string
+ var (
+ sqlVar = "select $1::point"
+ expected = "(1,2)"
+ actual string
+ err = db.QueryRow(sqlVar, expected).Scan(&actual)
+ )
- err := db.QueryRow(sql, expected).Scan(&actual)
require.NoError(t, err)
require.EqualValues(t, expected, actual)
})
@@ -576,17 +581,19 @@ func TestConnQueryJSONIntoByteSlice(t *testing.T) {
`)
require.NoError(t, err)
- sql := `select * from docs`
- expected := []byte(`{"foo": "bar"}`)
- var actual []byte
+ var (
+ sqlVar = `select * from docs`
+ expected = []byte(`{"foo": "bar"}`)
+ actual []byte
+ )
- err = db.QueryRow(sql).Scan(&actual)
+ err = db.QueryRow(sqlVar).Scan(&actual)
if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
+ t.Errorf("Unexpected failure: %v (sql -> %v)", err, sqlVar)
}
if !bytes.Equal(actual, expected) {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, string(expected), string(actual), sql)
+ t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, string(expected), string(actual), sqlVar)
}
_, err = db.Exec(`drop table docs`)
@@ -799,23 +806,23 @@ func TestConnMultiplePrepareAndDeallocate(t *testing.T) {
testWithAllQueryExecModes(t, func(t *testing.T, db *sql.DB) {
skipCockroachDB(t, db, "Server does not support pg_prepared_statements")
- sql := "select 42"
- stmt1, err := db.PrepareContext(context.Background(), sql)
+ sqlVar := "select 42"
+ stmt1, err := db.PrepareContext(context.Background(), sqlVar)
require.NoError(t, err)
- stmt2, err := db.PrepareContext(context.Background(), sql)
+ stmt2, err := db.PrepareContext(context.Background(), sqlVar)
require.NoError(t, err)
err = stmt1.Close()
require.NoError(t, err)
var preparedStmtCount int64
- err = db.QueryRowContext(context.Background(), "select count(*) from pg_prepared_statements where statement = $1", sql).Scan(&preparedStmtCount)
+ err = db.QueryRowContext(context.Background(), "select count(*) from pg_prepared_statements where statement = $1", sqlVar).Scan(&preparedStmtCount)
require.NoError(t, err)
require.EqualValues(t, 1, preparedStmtCount)
err = stmt2.Close() // err isn't as useful as it should be as database/sql will ignore errors from Deallocate.
require.NoError(t, err)
- err = db.QueryRowContext(context.Background(), "select count(*) from pg_prepared_statements where statement = $1", sql).Scan(&preparedStmtCount)
+ err = db.QueryRowContext(context.Background(), "select count(*) from pg_prepared_statements where statement = $1", sqlVar).Scan(&preparedStmtCount)
require.NoError(t, err)
require.EqualValues(t, 0, preparedStmtCount)
})
@@ -1213,7 +1220,7 @@ func TestOptionBeforeAfterConnect(t *testing.T) {
var beforeConnConfigs []*pgx.ConnConfig
var afterConns []*pgx.Conn
- db := stdlib.OpenDB(*config,
+ db := stdlib.OpenDB(config,
stdlib.OptionBeforeConnect(func(ctx context.Context, connConfig *pgx.ConnConfig) error {
beforeConnConfigs = append(beforeConnConfigs, connConfig)
return nil
@@ -1286,7 +1293,7 @@ func TestResetSessionHookCalled(t *testing.T) {
connConfig, err := pgx.ParseConfig(os.Getenv("PGX_TEST_DATABASE"))
require.NoError(t, err)
- db := stdlib.OpenDB(*connConfig, stdlib.OptionResetSession(func(ctx context.Context, conn *pgx.Conn) error {
+ db := stdlib.OpenDB(connConfig, stdlib.OptionResetSession(func(ctx context.Context, conn *pgx.Conn) error {
mockCalled = true
return nil
diff --git a/tracelog/tracelog.go b/tracelog/tracelog.go
index a68fc6a6a..3f80067be 100644
--- a/tracelog/tracelog.go
+++ b/tracelog/tracelog.go
@@ -94,13 +94,13 @@ func LogLevelFromString(s string) (LogLevel, error) {
func logQueryArgs(args []any) []any {
logArgs := make([]any, 0, len(args))
- for _, a := range args {
- switch v := a.(type) {
+ for i := range args {
+ switch v := args[i].(type) {
case []byte:
if len(v) < 64 {
- a = hex.EncodeToString(v)
+ args[i] = hex.EncodeToString(v)
} else {
- a = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64)
+ args[i] = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64)
}
case string:
if len(v) > 64 {
@@ -109,11 +109,11 @@ func logQueryArgs(args []any) []any {
_, w = utf8.DecodeRuneInString(v[l:])
}
if len(v) > l {
- a = fmt.Sprintf("%s (truncated %d bytes)", v[:l], len(v)-l)
+ args[i] = fmt.Sprintf("%s (truncated %d bytes)", v[:l], len(v)-l)
}
}
}
- logArgs = append(logArgs, a)
+ logArgs = append(logArgs, args[i])
}
return logArgs
diff --git a/tx.go b/tx.go
index 571e5e00f..747504427 100644
--- a/tx.go
+++ b/tx.go
@@ -9,8 +9,26 @@ import (
"github.com/jackc/pgx/v5/pgconn"
)
-// TxIsoLevel is the transaction isolation level (serializable, repeatable read, read committed or read uncommitted)
-type TxIsoLevel string
+type (
+ // TxIsoLevel is the transaction isolation level (serializable, repeatable read, read committed or read uncommitted)
+ TxIsoLevel string
+ // TxAccessMode is the transaction access mode (read write or read only)
+ TxAccessMode string
+ // TxDeferrableMode is the transaction deferrable mode (deferrable or not deferrable)
+ TxDeferrableMode string
+
+ TxOptions struct {
+ IsoLevel TxIsoLevel
+ AccessMode TxAccessMode
+ DeferrableMode TxDeferrableMode
+
+ // BeginQuery is the SQL query that will be executed to begin the transaction. This allows using non-standard syntax
+ // such as BEGIN PRIORITY HIGH with CockroachDB. If set this will override the other settings.
+ BeginQuery string
+ // CommitQuery is the SQL query that will be executed to commit the transaction.
+ CommitQuery string
+ }
+)
// Transaction isolation levels
const (
@@ -18,42 +36,26 @@ const (
RepeatableRead TxIsoLevel = "repeatable read"
ReadCommitted TxIsoLevel = "read committed"
ReadUncommitted TxIsoLevel = "read uncommitted"
-)
-
-// TxAccessMode is the transaction access mode (read write or read only)
-type TxAccessMode string
-// Transaction access modes
-const (
ReadWrite TxAccessMode = "read write"
ReadOnly TxAccessMode = "read only"
-)
-
-// TxDeferrableMode is the transaction deferrable mode (deferrable or not deferrable)
-type TxDeferrableMode string
-// Transaction deferrable modes
-const (
Deferrable TxDeferrableMode = "deferrable"
NotDeferrable TxDeferrableMode = "not deferrable"
)
-// TxOptions are transaction modes within a transaction block
-type TxOptions struct {
- IsoLevel TxIsoLevel
- AccessMode TxAccessMode
- DeferrableMode TxDeferrableMode
-
- // BeginQuery is the SQL query that will be executed to begin the transaction. This allows using non-standard syntax
- // such as BEGIN PRIORITY HIGH with CockroachDB. If set this will override the other settings.
- BeginQuery string
- // CommitQuery is the SQL query that will be executed to commit the transaction.
- CommitQuery string
-}
+var (
+ emptyTxOptions *TxOptions
+
+ ErrTxClosed = errors.New("tx is closed")
-var emptyTxOptions TxOptions
+ // ErrTxCommitRollback occurs when an error has occurred in a transaction and
+ // Commit() is called. PostgreSQL accepts COMMIT on aborted transactions, but
+ // it is treated as ROLLBACK.
+ ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
+)
-func (txOptions TxOptions) beginSQL() string {
+func (txOptions *TxOptions) beginSQL() string {
if txOptions == emptyTxOptions {
return "begin"
}
@@ -82,22 +84,15 @@ func (txOptions TxOptions) beginSQL() string {
return buf.String()
}
-var ErrTxClosed = errors.New("tx is closed")
-
-// ErrTxCommitRollback occurs when an error has occurred in a transaction and
-// Commit() is called. PostgreSQL accepts COMMIT on aborted transactions, but
-// it is treated as ROLLBACK.
-var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
-
// Begin starts a transaction. Unlike database/sql, the context only affects the begin command. i.e. there is no
// auto-rollback on context cancellation.
func (c *Conn) Begin(ctx context.Context) (Tx, error) {
- return c.BeginTx(ctx, TxOptions{})
+ return c.BeginTx(ctx, &TxOptions{})
}
// BeginTx starts a transaction with txOptions determining the transaction mode. Unlike database/sql, the context only
// affects the begin command. i.e. there is no auto-rollback on context cancellation.
-func (c *Conn) BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error) {
+func (c *Conn) BeginTx(ctx context.Context, txOptions *TxOptions) (Tx, error) {
_, err := c.Exec(ctx, txOptions.beginSQL())
if err != nil {
// begin should never fail unless there is an underlying connection issue or
@@ -410,9 +405,9 @@ func BeginFunc(
func BeginTxFunc(
ctx context.Context,
db interface {
- BeginTx(ctx context.Context, txOptions TxOptions) (Tx, error)
+ BeginTx(ctx context.Context, txOptions *TxOptions) (Tx, error)
},
- txOptions TxOptions,
+ txOptions *TxOptions,
fn func(Tx) error,
) (err error) {
var tx Tx
diff --git a/tx_test.go b/tx_test.go
index cd4fb2074..b5eeb1f71 100644
--- a/tx_test.go
+++ b/tx_test.go
@@ -171,13 +171,13 @@ func TestTxCommitSerializationFailure(t *testing.T) {
}
defer c1.Exec(ctx, `drop table tx_serializable_sums`)
- tx1, err := c1.BeginTx(ctx, pgx.TxOptions{IsoLevel: pgx.Serializable})
+ tx1, err := c1.BeginTx(ctx, &pgx.TxOptions{IsoLevel: pgx.Serializable})
if err != nil {
t.Fatalf("Begin failed: %v", err)
}
defer tx1.Rollback(ctx)
- tx2, err := c2.BeginTx(ctx, pgx.TxOptions{IsoLevel: pgx.Serializable})
+ tx2, err := c2.BeginTx(ctx, &pgx.TxOptions{IsoLevel: pgx.Serializable})
if err != nil {
t.Fatalf("Begin failed: %v", err)
}
@@ -278,7 +278,7 @@ func TestBeginIsoLevels(t *testing.T) {
isoLevels := []pgx.TxIsoLevel{pgx.Serializable, pgx.RepeatableRead, pgx.ReadCommitted, pgx.ReadUncommitted}
for _, iso := range isoLevels {
- tx, err := conn.BeginTx(context.Background(), pgx.TxOptions{IsoLevel: iso})
+ tx, err := conn.BeginTx(context.Background(), &pgx.TxOptions{IsoLevel: iso})
if err != nil {
t.Fatalf("conn.Begin failed: %v", err)
}
@@ -360,7 +360,7 @@ func TestBeginReadOnly(t *testing.T) {
conn := mustConnectString(t, os.Getenv("PGX_TEST_DATABASE"))
defer closeConn(t, conn)
- tx, err := conn.BeginTx(context.Background(), pgx.TxOptions{AccessMode: pgx.ReadOnly})
+ tx, err := conn.BeginTx(context.Background(), &pgx.TxOptions{AccessMode: pgx.ReadOnly})
if err != nil {
t.Fatalf("conn.Begin failed: %v", err)
}
@@ -379,7 +379,7 @@ func TestBeginTxBeginQuery(t *testing.T) {
defer cancel()
pgxtest.RunWithQueryExecModes(ctx, t, defaultConnTestRunner, nil, func(ctx context.Context, t testing.TB, conn *pgx.Conn) {
- tx, err := conn.BeginTx(ctx, pgx.TxOptions{BeginQuery: "begin read only"})
+ tx, err := conn.BeginTx(ctx, &pgx.TxOptions{BeginQuery: "begin read only"})
require.NoError(t, err)
defer tx.Rollback(ctx)