mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-10-31 22:40:01 +00:00
[chore]: Bump github.com/jackc/pgx/v5 from 5.6.0 to 5.7.1 (#3302)
Bumps [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) from 5.6.0 to 5.7.1. - [Changelog](https://github.com/jackc/pgx/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v5.6.0...v5.7.1) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
f3311d6273
commit
ca729aa4a0
29 changed files with 749 additions and 160 deletions
6
go.mod
6
go.mod
|
@ -39,7 +39,7 @@ require (
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/feeds v1.2.0
|
github.com/gorilla/feeds v1.2.0
|
||||||
github.com/gorilla/websocket v1.5.2
|
github.com/gorilla/websocket v1.5.2
|
||||||
github.com/jackc/pgx/v5 v5.6.0
|
github.com/jackc/pgx/v5 v5.7.1
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/miekg/dns v1.1.62
|
github.com/miekg/dns v1.1.62
|
||||||
github.com/minio/minio-go/v7 v7.0.76
|
github.com/minio/minio-go/v7 v7.0.76
|
||||||
|
@ -154,8 +154,8 @@ require (
|
||||||
github.com/imdario/mergo v0.3.16 // indirect
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||||
github.com/jessevdk/go-flags v1.5.0 // indirect
|
github.com/jessevdk/go-flags v1.5.0 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
|
|
12
go.sum
12
go.sum
|
@ -364,12 +364,12 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||||
github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY=
|
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
|
||||||
github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw=
|
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
|
||||||
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
|
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||||
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
|
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
|
||||||
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
|
||||||
|
|
9
vendor/github.com/jackc/pgservicefile/.travis.yml
generated
vendored
9
vendor/github.com/jackc/pgservicefile/.travis.yml
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.x
|
|
||||||
- tip
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
5
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
5
vendor/github.com/jackc/pgservicefile/README.md
generated
vendored
|
@ -1,5 +1,6 @@
|
||||||
[![](https://godoc.org/github.com/jackc/pgservicefile?status.svg)](https://godoc.org/github.com/jackc/pgservicefile)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgservicefile.svg)](https://pkg.go.dev/github.com/jackc/pgservicefile)
|
||||||
[![Build Status](https://travis-ci.org/jackc/pgservicefile.svg)](https://travis-ci.org/jackc/pgservicefile)
|
[![Build Status](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml)
|
||||||
|
|
||||||
|
|
||||||
# pgservicefile
|
# pgservicefile
|
||||||
|
|
||||||
|
|
4
vendor/github.com/jackc/pgservicefile/pgservicefile.go
generated
vendored
4
vendor/github.com/jackc/pgservicefile/pgservicefile.go
generated
vendored
|
@ -57,7 +57,7 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
|
||||||
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
} else if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||||
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
|
service = &Service{Name: line[1 : len(line)-1], Settings: make(map[string]string)}
|
||||||
servicefile.Services = append(servicefile.Services, service)
|
servicefile.Services = append(servicefile.Services, service)
|
||||||
} else {
|
} else if service != nil {
|
||||||
parts := strings.SplitN(line, "=", 2)
|
parts := strings.SplitN(line, "=", 2)
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
return nil, fmt.Errorf("unable to parse line %d", lineNum)
|
return nil, fmt.Errorf("unable to parse line %d", lineNum)
|
||||||
|
@ -67,6 +67,8 @@ func ParseServicefile(r io.Reader) (*Servicefile, error) {
|
||||||
value := strings.TrimSpace(parts[1])
|
value := strings.TrimSpace(parts[1])
|
||||||
|
|
||||||
service.Settings[key] = value
|
service.Settings[key] = value
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("line %d is not in a section", lineNum)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
22
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
22
vendor/github.com/jackc/pgx/v5/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,25 @@
|
||||||
|
# 5.7.1 (September 10, 2024)
|
||||||
|
|
||||||
|
* Fix data race in tracelog.TraceLog
|
||||||
|
* Update puddle to v2.2.2. This removes the import of nanotime via linkname.
|
||||||
|
* Update golang.org/x/crypto and golang.org/x/text
|
||||||
|
|
||||||
|
# 5.7.0 (September 7, 2024)
|
||||||
|
|
||||||
|
* Add support for sslrootcert=system (Yann Soubeyrand)
|
||||||
|
* Add LoadTypes to load multiple types in a single SQL query (Nick Farrell)
|
||||||
|
* Add XMLCodec supports encoding + scanning XML column type like json (nickcruess-soda)
|
||||||
|
* Add MultiTrace (Stepan Rabotkin)
|
||||||
|
* Add TraceLogConfig with customizable TimeKey (stringintech)
|
||||||
|
* pgx.ErrNoRows wraps sql.ErrNoRows to aid in database/sql compatibility with native pgx functions (merlin)
|
||||||
|
* Support scanning binary formatted uint32 into string / TextScanner (jennifersp)
|
||||||
|
* Fix interval encoding to allow 0s and avoid extra spaces (Carlos Pérez-Aradros Herce)
|
||||||
|
* Update pgservicefile - fixes panic when parsing invalid file
|
||||||
|
* Better error message when reading past end of batch
|
||||||
|
* Don't print url when url.Parse returns an error (Kevin Biju)
|
||||||
|
* Fix snake case name normalization collision in RowToStructByName with db tag (nolandseigler)
|
||||||
|
* Fix: Scan and encode types with underlying types of arrays
|
||||||
|
|
||||||
# 5.6.0 (May 25, 2024)
|
# 5.6.0 (May 25, 2024)
|
||||||
|
|
||||||
* Add StrictNamedArgs (Tomas Zahradnicek)
|
* Add StrictNamedArgs (Tomas Zahradnicek)
|
||||||
|
|
44
vendor/github.com/jackc/pgx/v5/batch.go
generated
vendored
44
vendor/github.com/jackc/pgx/v5/batch.go
generated
vendored
|
@ -60,9 +60,13 @@ type Batch struct {
|
||||||
QueuedQueries []*QueuedQuery
|
QueuedQueries []*QueuedQuery
|
||||||
}
|
}
|
||||||
|
|
||||||
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement.
|
// Queue queues a query to batch b. query can be an SQL query or the name of a prepared statement. The only pgx option
|
||||||
// The only pgx option argument that is supported is QueryRewriter. Queries are executed using the
|
// argument that is supported is QueryRewriter. Queries are executed using the connection's DefaultQueryExecMode.
|
||||||
// connection's DefaultQueryExecMode.
|
//
|
||||||
|
// While query can contain multiple statements if the connection's DefaultQueryExecMode is QueryModeSimple, this should
|
||||||
|
// be avoided. QueuedQuery.Fn must not be set as it will only be called for the first query. That is, QueuedQuery.Query,
|
||||||
|
// QueuedQuery.QueryRow, and QueuedQuery.Exec must not be called. In addition, any error messages or tracing that
|
||||||
|
// include the current query may reference the wrong query.
|
||||||
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
|
func (b *Batch) Queue(query string, arguments ...any) *QueuedQuery {
|
||||||
qq := &QueuedQuery{
|
qq := &QueuedQuery{
|
||||||
SQL: query,
|
SQL: query,
|
||||||
|
@ -128,7 +132,7 @@ func (br *batchResults) Exec() (pgconn.CommandTag, error) {
|
||||||
if !br.mrr.NextResult() {
|
if !br.mrr.NextResult() {
|
||||||
err := br.mrr.Close()
|
err := br.mrr.Close()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = errors.New("no result")
|
err = errors.New("no more results in batch")
|
||||||
}
|
}
|
||||||
if br.conn.batchTracer != nil {
|
if br.conn.batchTracer != nil {
|
||||||
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
br.conn.batchTracer.TraceBatchQuery(br.ctx, br.conn, TraceBatchQueryData{
|
||||||
|
@ -180,7 +184,7 @@ func (br *batchResults) Query() (Rows, error) {
|
||||||
if !br.mrr.NextResult() {
|
if !br.mrr.NextResult() {
|
||||||
rows.err = br.mrr.Close()
|
rows.err = br.mrr.Close()
|
||||||
if rows.err == nil {
|
if rows.err == nil {
|
||||||
rows.err = errors.New("no result")
|
rows.err = errors.New("no more results in batch")
|
||||||
}
|
}
|
||||||
rows.closed = true
|
rows.closed = true
|
||||||
|
|
||||||
|
@ -287,7 +291,10 @@ func (br *pipelineBatchResults) Exec() (pgconn.CommandTag, error) {
|
||||||
return pgconn.CommandTag{}, br.err
|
return pgconn.CommandTag{}, br.err
|
||||||
}
|
}
|
||||||
|
|
||||||
query, arguments, _ := br.nextQueryAndArgs()
|
query, arguments, err := br.nextQueryAndArgs()
|
||||||
|
if err != nil {
|
||||||
|
return pgconn.CommandTag{}, err
|
||||||
|
}
|
||||||
|
|
||||||
results, err := br.pipeline.GetResults()
|
results, err := br.pipeline.GetResults()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -330,9 +337,9 @@ func (br *pipelineBatchResults) Query() (Rows, error) {
|
||||||
return &baseRows{err: br.err, closed: true}, br.err
|
return &baseRows{err: br.err, closed: true}, br.err
|
||||||
}
|
}
|
||||||
|
|
||||||
query, arguments, ok := br.nextQueryAndArgs()
|
query, arguments, err := br.nextQueryAndArgs()
|
||||||
if !ok {
|
if err != nil {
|
||||||
query = "batch query"
|
return &baseRows{err: err, closed: true}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rows := br.conn.getRows(br.ctx, query, arguments)
|
rows := br.conn.getRows(br.ctx, query, arguments)
|
||||||
|
@ -421,13 +428,16 @@ func (br *pipelineBatchResults) earlyError() error {
|
||||||
return br.err
|
return br.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, ok bool) {
|
func (br *pipelineBatchResults) nextQueryAndArgs() (query string, args []any, err error) {
|
||||||
if br.b != nil && br.qqIdx < len(br.b.QueuedQueries) {
|
if br.b == nil {
|
||||||
bi := br.b.QueuedQueries[br.qqIdx]
|
return "", nil, errors.New("no reference to batch")
|
||||||
query = bi.SQL
|
|
||||||
args = bi.Arguments
|
|
||||||
ok = true
|
|
||||||
br.qqIdx++
|
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
if br.qqIdx >= len(br.b.QueuedQueries) {
|
||||||
|
return "", nil, errors.New("no more results in batch")
|
||||||
|
}
|
||||||
|
|
||||||
|
bi := br.b.QueuedQueries[br.qqIdx]
|
||||||
|
br.qqIdx++
|
||||||
|
return bi.SQL, bi.Arguments, nil
|
||||||
}
|
}
|
||||||
|
|
26
vendor/github.com/jackc/pgx/v5/conn.go
generated
vendored
26
vendor/github.com/jackc/pgx/v5/conn.go
generated
vendored
|
@ -3,6 +3,7 @@
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"database/sql"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -102,13 +103,31 @@ func (ident Identifier) Sanitize() string {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNoRows occurs when rows are expected but none are returned.
|
// ErrNoRows occurs when rows are expected but none are returned.
|
||||||
ErrNoRows = errors.New("no rows in result set")
|
ErrNoRows = newProxyErr(sql.ErrNoRows, "no rows in result set")
|
||||||
// ErrTooManyRows occurs when more rows than expected are returned.
|
// ErrTooManyRows occurs when more rows than expected are returned.
|
||||||
ErrTooManyRows = errors.New("too many rows in result set")
|
ErrTooManyRows = errors.New("too many rows in result set")
|
||||||
)
|
)
|
||||||
|
|
||||||
var errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
|
func newProxyErr(background error, msg string) error {
|
||||||
var errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
|
return &proxyError{
|
||||||
|
msg: msg,
|
||||||
|
background: background,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type proxyError struct {
|
||||||
|
msg string
|
||||||
|
background error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *proxyError) Error() string { return err.msg }
|
||||||
|
|
||||||
|
func (err *proxyError) Unwrap() error { return err.background }
|
||||||
|
|
||||||
|
var (
|
||||||
|
errDisabledStatementCache = fmt.Errorf("cannot use QueryExecModeCacheStatement with disabled statement cache")
|
||||||
|
errDisabledDescriptionCache = fmt.Errorf("cannot use QueryExecModeCacheDescribe with disabled description cache")
|
||||||
|
)
|
||||||
|
|
||||||
// Connect establishes a connection with a PostgreSQL server with a connection string. See
|
// Connect establishes a connection with a PostgreSQL server with a connection string. See
|
||||||
// pgconn.Connect for details.
|
// pgconn.Connect for details.
|
||||||
|
@ -843,7 +862,6 @@ func (c *Conn) getStatementDescription(
|
||||||
mode QueryExecMode,
|
mode QueryExecMode,
|
||||||
sql string,
|
sql string,
|
||||||
) (sd *pgconn.StatementDescription, err error) {
|
) (sd *pgconn.StatementDescription, err error) {
|
||||||
|
|
||||||
switch mode {
|
switch mode {
|
||||||
case QueryExecModeCacheStatement:
|
case QueryExecModeCacheStatement:
|
||||||
if c.statementCache == nil {
|
if c.statementCache == nil {
|
||||||
|
|
262
vendor/github.com/jackc/pgx/v5/derived_types.go
generated
vendored
Normal file
262
vendor/github.com/jackc/pgx/v5/derived_types.go
generated
vendored
Normal file
|
@ -0,0 +1,262 @@
|
||||||
|
package pgx
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/jackc/pgx/v5/pgtype"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
buildLoadDerivedTypesSQL generates the correct query for retrieving type information.
|
||||||
|
|
||||||
|
pgVersion: the major version of the PostgreSQL server
|
||||||
|
typeNames: the names of the types to load. If nil, load all types.
|
||||||
|
*/
|
||||||
|
func buildLoadDerivedTypesSQL(pgVersion int64, typeNames []string) string {
|
||||||
|
supportsMultirange := (pgVersion >= 14)
|
||||||
|
var typeNamesClause string
|
||||||
|
|
||||||
|
if typeNames == nil {
|
||||||
|
// This should not occur; this will not return any types
|
||||||
|
typeNamesClause = "= ''"
|
||||||
|
} else {
|
||||||
|
typeNamesClause = "= ANY($1)"
|
||||||
|
}
|
||||||
|
parts := make([]string, 0, 10)
|
||||||
|
|
||||||
|
// Each of the type names provided might be found in pg_class or pg_type.
|
||||||
|
// Additionally, it may or may not include a schema portion.
|
||||||
|
parts = append(parts, `
|
||||||
|
WITH RECURSIVE
|
||||||
|
-- find the OIDs in pg_class which match one of the provided type names
|
||||||
|
selected_classes(oid,reltype) AS (
|
||||||
|
-- this query uses the namespace search path, so will match type names without a schema prefix
|
||||||
|
SELECT pg_class.oid, pg_class.reltype
|
||||||
|
FROM pg_catalog.pg_class
|
||||||
|
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = pg_class.relnamespace
|
||||||
|
WHERE pg_catalog.pg_table_is_visible(pg_class.oid)
|
||||||
|
AND relname `, typeNamesClause, `
|
||||||
|
UNION ALL
|
||||||
|
-- this query will only match type names which include the schema prefix
|
||||||
|
SELECT pg_class.oid, pg_class.reltype
|
||||||
|
FROM pg_class
|
||||||
|
INNER JOIN pg_namespace ON (pg_class.relnamespace = pg_namespace.oid)
|
||||||
|
WHERE nspname || '.' || relname `, typeNamesClause, `
|
||||||
|
),
|
||||||
|
selected_types(oid) AS (
|
||||||
|
-- collect the OIDs from pg_types which correspond to the selected classes
|
||||||
|
SELECT reltype AS oid
|
||||||
|
FROM selected_classes
|
||||||
|
UNION ALL
|
||||||
|
-- as well as any other type names which match our criteria
|
||||||
|
SELECT pg_type.oid
|
||||||
|
FROM pg_type
|
||||||
|
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
|
||||||
|
WHERE typname `, typeNamesClause, `
|
||||||
|
OR nspname || '.' || typname `, typeNamesClause, `
|
||||||
|
),
|
||||||
|
-- this builds a parent/child mapping of objects, allowing us to know
|
||||||
|
-- all the child (ie: dependent) types that a parent (type) requires
|
||||||
|
-- As can be seen, there are 3 ways this can occur (the last of which
|
||||||
|
-- is due to being a composite class, where the composite fields are children)
|
||||||
|
pc(parent, child) AS (
|
||||||
|
SELECT parent.oid, parent.typelem
|
||||||
|
FROM pg_type parent
|
||||||
|
WHERE parent.typtype = 'b' AND parent.typelem != 0
|
||||||
|
UNION ALL
|
||||||
|
SELECT parent.oid, parent.typbasetype
|
||||||
|
FROM pg_type parent
|
||||||
|
WHERE parent.typtypmod = -1 AND parent.typbasetype != 0
|
||||||
|
UNION ALL
|
||||||
|
SELECT pg_type.oid, atttypid
|
||||||
|
FROM pg_attribute
|
||||||
|
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
|
||||||
|
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
|
||||||
|
WHERE NOT attisdropped
|
||||||
|
AND attnum > 0
|
||||||
|
),
|
||||||
|
-- Now construct a recursive query which includes a 'depth' element.
|
||||||
|
-- This is used to ensure that the "youngest" children are registered before
|
||||||
|
-- their parents.
|
||||||
|
relationships(parent, child, depth) AS (
|
||||||
|
SELECT DISTINCT 0::OID, selected_types.oid, 0
|
||||||
|
FROM selected_types
|
||||||
|
UNION ALL
|
||||||
|
SELECT pg_type.oid AS parent, pg_attribute.atttypid AS child, 1
|
||||||
|
FROM selected_classes c
|
||||||
|
inner join pg_type ON (c.reltype = pg_type.oid)
|
||||||
|
inner join pg_attribute on (c.oid = pg_attribute.attrelid)
|
||||||
|
UNION ALL
|
||||||
|
SELECT pc.parent, pc.child, relationships.depth + 1
|
||||||
|
FROM pc
|
||||||
|
INNER JOIN relationships ON (pc.parent = relationships.child)
|
||||||
|
),
|
||||||
|
-- composite fields need to be encapsulated as a couple of arrays to provide the required information for registration
|
||||||
|
composite AS (
|
||||||
|
SELECT pg_type.oid, ARRAY_AGG(attname ORDER BY attnum) AS attnames, ARRAY_AGG(atttypid ORDER BY ATTNUM) AS atttypids
|
||||||
|
FROM pg_attribute
|
||||||
|
INNER JOIN pg_class ON (pg_class.oid = pg_attribute.attrelid)
|
||||||
|
INNER JOIN pg_type ON (pg_type.oid = pg_class.reltype)
|
||||||
|
WHERE NOT attisdropped
|
||||||
|
AND attnum > 0
|
||||||
|
GROUP BY pg_type.oid
|
||||||
|
)
|
||||||
|
-- Bring together this information, showing all the information which might possibly be required
|
||||||
|
-- to complete the registration, applying filters to only show the items which relate to the selected
|
||||||
|
-- types/classes.
|
||||||
|
SELECT typname,
|
||||||
|
pg_namespace.nspname,
|
||||||
|
typtype,
|
||||||
|
typbasetype,
|
||||||
|
typelem,
|
||||||
|
pg_type.oid,`)
|
||||||
|
if supportsMultirange {
|
||||||
|
parts = append(parts, `
|
||||||
|
COALESCE(multirange.rngtypid, 0) AS rngtypid,`)
|
||||||
|
} else {
|
||||||
|
parts = append(parts, `
|
||||||
|
0 AS rngtypid,`)
|
||||||
|
}
|
||||||
|
parts = append(parts, `
|
||||||
|
COALESCE(pg_range.rngsubtype, 0) AS rngsubtype,
|
||||||
|
attnames, atttypids
|
||||||
|
FROM relationships
|
||||||
|
INNER JOIN pg_type ON (pg_type.oid = relationships.child)
|
||||||
|
LEFT OUTER JOIN pg_range ON (pg_type.oid = pg_range.rngtypid)`)
|
||||||
|
if supportsMultirange {
|
||||||
|
parts = append(parts, `
|
||||||
|
LEFT OUTER JOIN pg_range multirange ON (pg_type.oid = multirange.rngmultitypid)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts = append(parts, `
|
||||||
|
LEFT OUTER JOIN composite USING (oid)
|
||||||
|
LEFT OUTER JOIN pg_namespace ON (pg_type.typnamespace = pg_namespace.oid)
|
||||||
|
WHERE NOT (typtype = 'b' AND typelem = 0)`)
|
||||||
|
parts = append(parts, `
|
||||||
|
GROUP BY typname, pg_namespace.nspname, typtype, typbasetype, typelem, pg_type.oid, pg_range.rngsubtype,`)
|
||||||
|
if supportsMultirange {
|
||||||
|
parts = append(parts, `
|
||||||
|
multirange.rngtypid,`)
|
||||||
|
}
|
||||||
|
parts = append(parts, `
|
||||||
|
attnames, atttypids
|
||||||
|
ORDER BY MAX(depth) desc, typname;`)
|
||||||
|
return strings.Join(parts, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
type derivedTypeInfo struct {
|
||||||
|
Oid, Typbasetype, Typelem, Rngsubtype, Rngtypid uint32
|
||||||
|
TypeName, Typtype, NspName string
|
||||||
|
Attnames []string
|
||||||
|
Atttypids []uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadTypes performs a single (complex) query, returning all the required
|
||||||
|
// information to register the named types, as well as any other types directly
|
||||||
|
// or indirectly required to complete the registration.
|
||||||
|
// The result of this call can be passed into RegisterTypes to complete the process.
|
||||||
|
func (c *Conn) LoadTypes(ctx context.Context, typeNames []string) ([]*pgtype.Type, error) {
|
||||||
|
m := c.TypeMap()
|
||||||
|
if typeNames == nil || len(typeNames) == 0 {
|
||||||
|
return nil, fmt.Errorf("No type names were supplied.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disregard server version errors. This will result in
|
||||||
|
// the SQL not support recent structures such as multirange
|
||||||
|
serverVersion, _ := serverVersion(c)
|
||||||
|
sql := buildLoadDerivedTypesSQL(serverVersion, typeNames)
|
||||||
|
var rows Rows
|
||||||
|
var err error
|
||||||
|
if typeNames == nil {
|
||||||
|
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol)
|
||||||
|
} else {
|
||||||
|
rows, err = c.Query(ctx, sql, QueryExecModeSimpleProtocol, typeNames)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("While generating load types query: %w", err)
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
result := make([]*pgtype.Type, 0, 100)
|
||||||
|
for rows.Next() {
|
||||||
|
ti := derivedTypeInfo{}
|
||||||
|
err = rows.Scan(&ti.TypeName, &ti.NspName, &ti.Typtype, &ti.Typbasetype, &ti.Typelem, &ti.Oid, &ti.Rngtypid, &ti.Rngsubtype, &ti.Attnames, &ti.Atttypids)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("While scanning type information: %w", err)
|
||||||
|
}
|
||||||
|
var type_ *pgtype.Type
|
||||||
|
switch ti.Typtype {
|
||||||
|
case "b": // array
|
||||||
|
dt, ok := m.TypeForOID(ti.Typelem)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Array element OID %v not registered while loading pgtype %q", ti.Typelem, ti.TypeName)
|
||||||
|
}
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.ArrayCodec{ElementType: dt}}
|
||||||
|
case "c": // composite
|
||||||
|
var fields []pgtype.CompositeCodecField
|
||||||
|
for i, fieldName := range ti.Attnames {
|
||||||
|
dt, ok := m.TypeForOID(ti.Atttypids[i])
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Unknown field for composite type %q: field %q (OID %v) is not already registered.", ti.TypeName, fieldName, ti.Atttypids[i])
|
||||||
|
}
|
||||||
|
fields = append(fields, pgtype.CompositeCodecField{Name: fieldName, Type: dt})
|
||||||
|
}
|
||||||
|
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.CompositeCodec{Fields: fields}}
|
||||||
|
case "d": // domain
|
||||||
|
dt, ok := m.TypeForOID(ti.Typbasetype)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Domain base type OID %v was not already registered, needed for %q", ti.Typbasetype, ti.TypeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: dt.Codec}
|
||||||
|
case "e": // enum
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.EnumCodec{}}
|
||||||
|
case "r": // range
|
||||||
|
dt, ok := m.TypeForOID(ti.Rngsubtype)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Range element OID %v was not already registered, needed for %q", ti.Rngsubtype, ti.TypeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.RangeCodec{ElementType: dt}}
|
||||||
|
case "m": // multirange
|
||||||
|
dt, ok := m.TypeForOID(ti.Rngtypid)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Multirange element OID %v was not already registered, needed for %q", ti.Rngtypid, ti.TypeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type_ = &pgtype.Type{Name: ti.TypeName, OID: ti.Oid, Codec: &pgtype.MultirangeCodec{ElementType: dt}}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unknown typtype %q was found while registering %q", ti.Typtype, ti.TypeName)
|
||||||
|
}
|
||||||
|
if type_ != nil {
|
||||||
|
m.RegisterType(type_)
|
||||||
|
if ti.NspName != "" {
|
||||||
|
nspType := &pgtype.Type{Name: ti.NspName + "." + type_.Name, OID: type_.OID, Codec: type_.Codec}
|
||||||
|
m.RegisterType(nspType)
|
||||||
|
result = append(result, nspType)
|
||||||
|
}
|
||||||
|
result = append(result, type_)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// serverVersion returns the postgresql server version.
|
||||||
|
func serverVersion(c *Conn) (int64, error) {
|
||||||
|
serverVersionStr := c.PgConn().ParameterStatus("server_version")
|
||||||
|
serverVersionStr = regexp.MustCompile(`^[0-9]+`).FindString(serverVersionStr)
|
||||||
|
// if not PostgreSQL do nothing
|
||||||
|
if serverVersionStr == "" {
|
||||||
|
return 0, fmt.Errorf("Cannot identify server version in %q", serverVersionStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
version, err := strconv.ParseInt(serverVersionStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("postgres version parsing failed: %w", err)
|
||||||
|
}
|
||||||
|
return version, nil
|
||||||
|
}
|
2
vendor/github.com/jackc/pgx/v5/doc.go
generated
vendored
2
vendor/github.com/jackc/pgx/v5/doc.go
generated
vendored
|
@ -175,7 +175,7 @@
|
||||||
|
|
||||||
Tracing and Logging
|
Tracing and Logging
|
||||||
|
|
||||||
pgx supports tracing by setting ConnConfig.Tracer.
|
pgx supports tracing by setting ConnConfig.Tracer. To combine several tracers you can use the multitracer.Tracer.
|
||||||
|
|
||||||
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
|
In addition, the tracelog package provides the TraceLog type which lets a traditional logger act as a Tracer.
|
||||||
|
|
||||||
|
|
64
vendor/github.com/jackc/pgx/v5/pgconn/config.go
generated
vendored
64
vendor/github.com/jackc/pgx/v5/pgconn/config.go
generated
vendored
|
@ -467,14 +467,17 @@ func parseEnvSettings() map[string]string {
|
||||||
func parseURLSettings(connString string) (map[string]string, error) {
|
func parseURLSettings(connString string) (map[string]string, error) {
|
||||||
settings := make(map[string]string)
|
settings := make(map[string]string)
|
||||||
|
|
||||||
url, err := url.Parse(connString)
|
parsedURL, err := url.Parse(connString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if urlErr := new(url.Error); errors.As(err, &urlErr) {
|
||||||
|
return nil, urlErr.Err
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if url.User != nil {
|
if parsedURL.User != nil {
|
||||||
settings["user"] = url.User.Username()
|
settings["user"] = parsedURL.User.Username()
|
||||||
if password, present := url.User.Password(); present {
|
if password, present := parsedURL.User.Password(); present {
|
||||||
settings["password"] = password
|
settings["password"] = password
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -482,7 +485,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
|
||||||
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
|
// Handle multiple host:port's in url.Host by splitting them into host,host,host and port,port,port.
|
||||||
var hosts []string
|
var hosts []string
|
||||||
var ports []string
|
var ports []string
|
||||||
for _, host := range strings.Split(url.Host, ",") {
|
for _, host := range strings.Split(parsedURL.Host, ",") {
|
||||||
if host == "" {
|
if host == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -508,7 +511,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
|
||||||
settings["port"] = strings.Join(ports, ",")
|
settings["port"] = strings.Join(ports, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
database := strings.TrimLeft(url.Path, "/")
|
database := strings.TrimLeft(parsedURL.Path, "/")
|
||||||
if database != "" {
|
if database != "" {
|
||||||
settings["database"] = database
|
settings["database"] = database
|
||||||
}
|
}
|
||||||
|
@ -517,7 +520,7 @@ func parseURLSettings(connString string) (map[string]string, error) {
|
||||||
"dbname": "database",
|
"dbname": "database",
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range url.Query() {
|
for k, v := range parsedURL.Query() {
|
||||||
if k2, present := nameMap[k]; present {
|
if k2, present := nameMap[k]; present {
|
||||||
k = k2
|
k = k2
|
||||||
}
|
}
|
||||||
|
@ -654,6 +657,36 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
|
||||||
|
|
||||||
tlsConfig := &tls.Config{}
|
tlsConfig := &tls.Config{}
|
||||||
|
|
||||||
|
if sslrootcert != "" {
|
||||||
|
var caCertPool *x509.CertPool
|
||||||
|
|
||||||
|
if sslrootcert == "system" {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
caCertPool, err = x509.SystemCertPool()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to load system certificate pool: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sslmode = "verify-full"
|
||||||
|
} else {
|
||||||
|
caCertPool = x509.NewCertPool()
|
||||||
|
|
||||||
|
caPath := sslrootcert
|
||||||
|
caCert, err := os.ReadFile(caPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to read CA file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !caCertPool.AppendCertsFromPEM(caCert) {
|
||||||
|
return nil, errors.New("unable to add CA to cert pool")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tlsConfig.RootCAs = caCertPool
|
||||||
|
tlsConfig.ClientCAs = caCertPool
|
||||||
|
}
|
||||||
|
|
||||||
switch sslmode {
|
switch sslmode {
|
||||||
case "disable":
|
case "disable":
|
||||||
return []*tls.Config{nil}, nil
|
return []*tls.Config{nil}, nil
|
||||||
|
@ -711,23 +744,6 @@ func configTLS(settings map[string]string, thisHost string, parseConfigOptions P
|
||||||
return nil, errors.New("sslmode is invalid")
|
return nil, errors.New("sslmode is invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
if sslrootcert != "" {
|
|
||||||
caCertPool := x509.NewCertPool()
|
|
||||||
|
|
||||||
caPath := sslrootcert
|
|
||||||
caCert, err := os.ReadFile(caPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to read CA file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !caCertPool.AppendCertsFromPEM(caCert) {
|
|
||||||
return nil, errors.New("unable to add CA to cert pool")
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConfig.RootCAs = caCertPool
|
|
||||||
tlsConfig.ClientCAs = caCertPool
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
|
if (sslcert != "" && sslkey == "") || (sslcert == "" && sslkey != "") {
|
||||||
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
|
return nil, errors.New(`both "sslcert" and "sslkey" are required`)
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/jackc/pgx/v5/pgtype/doc.go
generated
vendored
3
vendor/github.com/jackc/pgx/v5/pgtype/doc.go
generated
vendored
|
@ -53,6 +53,9 @@
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
When using nullable pgtype types as parameters for queries, one has to remember
|
||||||
|
to explicitly set their Valid field to true, otherwise the parameter's value will be NULL.
|
||||||
|
|
||||||
JSON Support
|
JSON Support
|
||||||
|
|
||||||
pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
|
pgtype automatically marshals and unmarshals data from json and jsonb PostgreSQL types.
|
||||||
|
|
6
vendor/github.com/jackc/pgx/v5/pgtype/interval.go
generated
vendored
6
vendor/github.com/jackc/pgx/v5/pgtype/interval.go
generated
vendored
|
@ -132,12 +132,9 @@ func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte,
|
||||||
|
|
||||||
if interval.Days != 0 {
|
if interval.Days != 0 {
|
||||||
buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...)
|
buf = append(buf, strconv.FormatInt(int64(interval.Days), 10)...)
|
||||||
buf = append(buf, " day"...)
|
buf = append(buf, " day "...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if interval.Microseconds != 0 {
|
|
||||||
buf = append(buf, " "...)
|
|
||||||
|
|
||||||
absMicroseconds := interval.Microseconds
|
absMicroseconds := interval.Microseconds
|
||||||
if absMicroseconds < 0 {
|
if absMicroseconds < 0 {
|
||||||
absMicroseconds = -absMicroseconds
|
absMicroseconds = -absMicroseconds
|
||||||
|
@ -155,7 +152,6 @@ func (encodePlanIntervalCodecText) Encode(value any, buf []byte) (newBuf []byte,
|
||||||
if microseconds != 0 {
|
if microseconds != 0 {
|
||||||
buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
|
buf = append(buf, fmt.Sprintf(".%06d", microseconds)...)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return buf, nil
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/jackc/pgx/v5/pgtype/json.go
generated
vendored
9
vendor/github.com/jackc/pgx/v5/pgtype/json.go
generated
vendored
|
@ -37,7 +37,7 @@ func (c *JSONCodec) PlanEncode(m *Map, oid uint32, format int16, value any) Enco
|
||||||
//
|
//
|
||||||
// https://github.com/jackc/pgx/issues/1430
|
// https://github.com/jackc/pgx/issues/1430
|
||||||
//
|
//
|
||||||
// Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to beused
|
// Check for driver.Valuer must come before json.Marshaler so that it is guaranteed to be used
|
||||||
// when both are implemented https://github.com/jackc/pgx/issues/1805
|
// when both are implemented https://github.com/jackc/pgx/issues/1805
|
||||||
case driver.Valuer:
|
case driver.Valuer:
|
||||||
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
|
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
|
||||||
|
@ -177,13 +177,6 @@ func (scanPlanJSONToByteSlice) Scan(src []byte, dst any) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type scanPlanJSONToBytesScanner struct{}
|
|
||||||
|
|
||||||
func (scanPlanJSONToBytesScanner) Scan(src []byte, dst any) error {
|
|
||||||
scanner := (dst).(BytesScanner)
|
|
||||||
return scanner.ScanBytes(src)
|
|
||||||
}
|
|
||||||
|
|
||||||
type scanPlanJSONToJSONUnmarshal struct {
|
type scanPlanJSONToJSONUnmarshal struct {
|
||||||
unmarshal func(data []byte, v any) error
|
unmarshal func(data []byte, v any) error
|
||||||
}
|
}
|
||||||
|
|
38
vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
generated
vendored
38
vendor/github.com/jackc/pgx/v5/pgtype/pgtype.go
generated
vendored
|
@ -26,6 +26,8 @@
|
||||||
XIDOID = 28
|
XIDOID = 28
|
||||||
CIDOID = 29
|
CIDOID = 29
|
||||||
JSONOID = 114
|
JSONOID = 114
|
||||||
|
XMLOID = 142
|
||||||
|
XMLArrayOID = 143
|
||||||
JSONArrayOID = 199
|
JSONArrayOID = 199
|
||||||
PointOID = 600
|
PointOID = 600
|
||||||
LsegOID = 601
|
LsegOID = 601
|
||||||
|
@ -214,6 +216,15 @@ type Map struct {
|
||||||
TryWrapScanPlanFuncs []TryWrapScanPlanFunc
|
TryWrapScanPlanFuncs []TryWrapScanPlanFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Copy returns a new Map containing the same registered types.
|
||||||
|
func (m *Map) Copy() *Map {
|
||||||
|
newMap := NewMap()
|
||||||
|
for _, type_ := range m.oidToType {
|
||||||
|
newMap.RegisterType(type_)
|
||||||
|
}
|
||||||
|
return newMap
|
||||||
|
}
|
||||||
|
|
||||||
func NewMap() *Map {
|
func NewMap() *Map {
|
||||||
defaultMapInitOnce.Do(initDefaultMap)
|
defaultMapInitOnce.Do(initDefaultMap)
|
||||||
|
|
||||||
|
@ -248,6 +259,13 @@ func NewMap() *Map {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RegisterTypes registers multiple data types in the sequence they are provided.
|
||||||
|
func (m *Map) RegisterTypes(types []*Type) {
|
||||||
|
for _, t := range types {
|
||||||
|
m.RegisterType(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
|
// RegisterType registers a data type with the Map. t must not be mutated after it is registered.
|
||||||
func (m *Map) RegisterType(t *Type) {
|
func (m *Map) RegisterType(t *Type) {
|
||||||
m.oidToType[t.OID] = t
|
m.oidToType[t.OID] = t
|
||||||
|
@ -555,17 +573,24 @@ func TryFindUnderlyingTypeScanPlan(dst any) (plan WrappedScanPlanNextSetter, nex
|
||||||
elemValue = dstValue.Elem()
|
elemValue = dstValue.Elem()
|
||||||
}
|
}
|
||||||
nextDstType := elemKindToPointerTypes[elemValue.Kind()]
|
nextDstType := elemKindToPointerTypes[elemValue.Kind()]
|
||||||
if nextDstType == nil && elemValue.Kind() == reflect.Slice {
|
if nextDstType == nil {
|
||||||
|
if elemValue.Kind() == reflect.Slice {
|
||||||
if elemValue.Type().Elem().Kind() == reflect.Uint8 {
|
if elemValue.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
var v *[]byte
|
var v *[]byte
|
||||||
nextDstType = reflect.TypeOf(v)
|
nextDstType = reflect.TypeOf(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get underlying type of any array.
|
||||||
|
// https://github.com/jackc/pgx/issues/2107
|
||||||
|
if elemValue.Kind() == reflect.Array {
|
||||||
|
nextDstType = reflect.PointerTo(reflect.ArrayOf(elemValue.Len(), elemValue.Type().Elem()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
|
if nextDstType != nil && dstValue.Type() != nextDstType && dstValue.CanConvert(nextDstType) {
|
||||||
return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
|
return &underlyingTypeScanPlan{dstType: dstValue.Type(), nextDstType: nextDstType}, dstValue.Convert(nextDstType).Interface(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, false
|
return nil, nil, false
|
||||||
|
@ -1405,6 +1430,15 @@ func TryWrapFindUnderlyingTypeEncodePlan(value any) (plan WrappedEncodePlanNextS
|
||||||
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
|
return &underlyingTypeEncodePlan{nextValueType: byteSliceType}, refValue.Convert(byteSliceType).Interface(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get underlying type of any array.
|
||||||
|
// https://github.com/jackc/pgx/issues/2107
|
||||||
|
if refValue.Kind() == reflect.Array {
|
||||||
|
underlyingArrayType := reflect.ArrayOf(refValue.Len(), refValue.Type().Elem())
|
||||||
|
if refValue.Type() != underlyingArrayType {
|
||||||
|
return &underlyingTypeEncodePlan{nextValueType: underlyingArrayType}, refValue.Convert(underlyingArrayType).Interface(), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil, false
|
return nil, nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
3
vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
generated
vendored
3
vendor/github.com/jackc/pgx/v5/pgtype/pgtype_default.go
generated
vendored
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"encoding/xml"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -89,6 +90,7 @@ func initDefaultMap() {
|
||||||
defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
|
defaultMap.RegisterType(&Type{Name: "varbit", OID: VarbitOID, Codec: BitsCodec{}})
|
||||||
defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
|
defaultMap.RegisterType(&Type{Name: "varchar", OID: VarcharOID, Codec: TextCodec{}})
|
||||||
defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
|
defaultMap.RegisterType(&Type{Name: "xid", OID: XIDOID, Codec: Uint32Codec{}})
|
||||||
|
defaultMap.RegisterType(&Type{Name: "xml", OID: XMLOID, Codec: &XMLCodec{Marshal: xml.Marshal, Unmarshal: xml.Unmarshal}})
|
||||||
|
|
||||||
// Range types
|
// Range types
|
||||||
defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
|
defaultMap.RegisterType(&Type{Name: "daterange", OID: DaterangeOID, Codec: &RangeCodec{ElementType: defaultMap.oidToType[DateOID]}})
|
||||||
|
@ -153,6 +155,7 @@ func initDefaultMap() {
|
||||||
defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
|
defaultMap.RegisterType(&Type{Name: "_varbit", OID: VarbitArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarbitOID]}})
|
||||||
defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
|
defaultMap.RegisterType(&Type{Name: "_varchar", OID: VarcharArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[VarcharOID]}})
|
||||||
defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
|
defaultMap.RegisterType(&Type{Name: "_xid", OID: XIDArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XIDOID]}})
|
||||||
|
defaultMap.RegisterType(&Type{Name: "_xml", OID: XMLArrayOID, Codec: &ArrayCodec{ElementType: defaultMap.oidToType[XMLOID]}})
|
||||||
|
|
||||||
// Integer types that directly map to a PostgreSQL type
|
// Integer types that directly map to a PostgreSQL type
|
||||||
registerDefaultPgTypeVariants[int16](defaultMap, "int2")
|
registerDefaultPgTypeVariants[int16](defaultMap, "int2")
|
||||||
|
|
8
vendor/github.com/jackc/pgx/v5/pgtype/time.go
generated
vendored
8
vendor/github.com/jackc/pgx/v5/pgtype/time.go
generated
vendored
|
@ -19,9 +19,11 @@ type TimeValuer interface {
|
||||||
|
|
||||||
// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
|
// Time represents the PostgreSQL time type. The PostgreSQL time is a time of day without time zone.
|
||||||
//
|
//
|
||||||
// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time
|
// Time is represented as the number of microseconds since midnight in the same way that PostgreSQL does. Other time and
|
||||||
// and date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due
|
// date types in pgtype can use time.Time as the underlying representation. However, pgtype.Time type cannot due to
|
||||||
// to needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
|
// needing to handle 24:00:00. time.Time converts that to 00:00:00 on the following day.
|
||||||
|
//
|
||||||
|
// The time with time zone type is not supported. Use of time with time zone is discouraged by the PostgreSQL documentation.
|
||||||
type Time struct {
|
type Time struct {
|
||||||
Microseconds int64 // Number of microseconds since midnight
|
Microseconds int64 // Number of microseconds since midnight
|
||||||
Valid bool
|
Valid bool
|
||||||
|
|
22
vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
generated
vendored
22
vendor/github.com/jackc/pgx/v5/pgtype/uint32.go
generated
vendored
|
@ -205,6 +205,8 @@ func (Uint32Codec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPl
|
||||||
return scanPlanBinaryUint32ToUint32{}
|
return scanPlanBinaryUint32ToUint32{}
|
||||||
case Uint32Scanner:
|
case Uint32Scanner:
|
||||||
return scanPlanBinaryUint32ToUint32Scanner{}
|
return scanPlanBinaryUint32ToUint32Scanner{}
|
||||||
|
case TextScanner:
|
||||||
|
return scanPlanBinaryUint32ToTextScanner{}
|
||||||
}
|
}
|
||||||
case TextFormatCode:
|
case TextFormatCode:
|
||||||
switch target.(type) {
|
switch target.(type) {
|
||||||
|
@ -282,6 +284,26 @@ func (scanPlanBinaryUint32ToUint32Scanner) Scan(src []byte, dst any) error {
|
||||||
return s.ScanUint32(Uint32{Uint32: n, Valid: true})
|
return s.ScanUint32(Uint32{Uint32: n, Valid: true})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type scanPlanBinaryUint32ToTextScanner struct{}
|
||||||
|
|
||||||
|
func (scanPlanBinaryUint32ToTextScanner) Scan(src []byte, dst any) error {
|
||||||
|
s, ok := (dst).(TextScanner)
|
||||||
|
if !ok {
|
||||||
|
return ErrScanTargetTypeChanged
|
||||||
|
}
|
||||||
|
|
||||||
|
if src == nil {
|
||||||
|
return s.ScanText(Text{})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(src) != 4 {
|
||||||
|
return fmt.Errorf("invalid length for uint32: %v", len(src))
|
||||||
|
}
|
||||||
|
|
||||||
|
n := uint64(binary.BigEndian.Uint32(src))
|
||||||
|
return s.ScanText(Text{String: strconv.FormatUint(n, 10), Valid: true})
|
||||||
|
}
|
||||||
|
|
||||||
type scanPlanTextAnyToUint32Scanner struct{}
|
type scanPlanTextAnyToUint32Scanner struct{}
|
||||||
|
|
||||||
func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {
|
func (scanPlanTextAnyToUint32Scanner) Scan(src []byte, dst any) error {
|
||||||
|
|
198
vendor/github.com/jackc/pgx/v5/pgtype/xml.go
generated
vendored
Normal file
198
vendor/github.com/jackc/pgx/v5/pgtype/xml.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
||||||
|
package pgtype
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
type XMLCodec struct {
|
||||||
|
Marshal func(v any) ([]byte, error)
|
||||||
|
Unmarshal func(data []byte, v any) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*XMLCodec) FormatSupported(format int16) bool {
|
||||||
|
return format == TextFormatCode || format == BinaryFormatCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*XMLCodec) PreferredFormat() int16 {
|
||||||
|
return TextFormatCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *XMLCodec) PlanEncode(m *Map, oid uint32, format int16, value any) EncodePlan {
|
||||||
|
switch value.(type) {
|
||||||
|
case string:
|
||||||
|
return encodePlanXMLCodecEitherFormatString{}
|
||||||
|
case []byte:
|
||||||
|
return encodePlanXMLCodecEitherFormatByteSlice{}
|
||||||
|
|
||||||
|
// Cannot rely on driver.Valuer being handled later because anything can be marshalled.
|
||||||
|
//
|
||||||
|
// https://github.com/jackc/pgx/issues/1430
|
||||||
|
//
|
||||||
|
// Check for driver.Valuer must come before xml.Marshaler so that it is guaranteed to be used
|
||||||
|
// when both are implemented https://github.com/jackc/pgx/issues/1805
|
||||||
|
case driver.Valuer:
|
||||||
|
return &encodePlanDriverValuer{m: m, oid: oid, formatCode: format}
|
||||||
|
|
||||||
|
// Must come before trying wrap encode plans because a pointer to a struct may be unwrapped to a struct that can be
|
||||||
|
// marshalled.
|
||||||
|
//
|
||||||
|
// https://github.com/jackc/pgx/issues/1681
|
||||||
|
case xml.Marshaler:
|
||||||
|
return &encodePlanXMLCodecEitherFormatMarshal{
|
||||||
|
marshal: c.Marshal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because anything can be marshalled the normal wrapping in Map.PlanScan doesn't get a chance to run. So try the
|
||||||
|
// appropriate wrappers here.
|
||||||
|
for _, f := range []TryWrapEncodePlanFunc{
|
||||||
|
TryWrapDerefPointerEncodePlan,
|
||||||
|
TryWrapFindUnderlyingTypeEncodePlan,
|
||||||
|
} {
|
||||||
|
if wrapperPlan, nextValue, ok := f(value); ok {
|
||||||
|
if nextPlan := c.PlanEncode(m, oid, format, nextValue); nextPlan != nil {
|
||||||
|
wrapperPlan.SetNext(nextPlan)
|
||||||
|
return wrapperPlan
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &encodePlanXMLCodecEitherFormatMarshal{
|
||||||
|
marshal: c.Marshal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type encodePlanXMLCodecEitherFormatString struct{}
|
||||||
|
|
||||||
|
func (encodePlanXMLCodecEitherFormatString) Encode(value any, buf []byte) (newBuf []byte, err error) {
|
||||||
|
xmlString := value.(string)
|
||||||
|
buf = append(buf, xmlString...)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type encodePlanXMLCodecEitherFormatByteSlice struct{}
|
||||||
|
|
||||||
|
func (encodePlanXMLCodecEitherFormatByteSlice) Encode(value any, buf []byte) (newBuf []byte, err error) {
|
||||||
|
xmlBytes := value.([]byte)
|
||||||
|
if xmlBytes == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = append(buf, xmlBytes...)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type encodePlanXMLCodecEitherFormatMarshal struct {
|
||||||
|
marshal func(v any) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *encodePlanXMLCodecEitherFormatMarshal) Encode(value any, buf []byte) (newBuf []byte, err error) {
|
||||||
|
xmlBytes, err := e.marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = append(buf, xmlBytes...)
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *XMLCodec) PlanScan(m *Map, oid uint32, format int16, target any) ScanPlan {
|
||||||
|
switch target.(type) {
|
||||||
|
case *string:
|
||||||
|
return scanPlanAnyToString{}
|
||||||
|
|
||||||
|
case **string:
|
||||||
|
// This is to fix **string scanning. It seems wrong to special case **string, but it's not clear what a better
|
||||||
|
// solution would be.
|
||||||
|
//
|
||||||
|
// https://github.com/jackc/pgx/issues/1470 -- **string
|
||||||
|
// https://github.com/jackc/pgx/issues/1691 -- ** anything else
|
||||||
|
|
||||||
|
if wrapperPlan, nextDst, ok := TryPointerPointerScanPlan(target); ok {
|
||||||
|
if nextPlan := m.planScan(oid, format, nextDst); nextPlan != nil {
|
||||||
|
if _, failed := nextPlan.(*scanPlanFail); !failed {
|
||||||
|
wrapperPlan.SetNext(nextPlan)
|
||||||
|
return wrapperPlan
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case *[]byte:
|
||||||
|
return scanPlanXMLToByteSlice{}
|
||||||
|
case BytesScanner:
|
||||||
|
return scanPlanBinaryBytesToBytesScanner{}
|
||||||
|
|
||||||
|
// Cannot rely on sql.Scanner being handled later because scanPlanXMLToXMLUnmarshal will take precedence.
|
||||||
|
//
|
||||||
|
// https://github.com/jackc/pgx/issues/1418
|
||||||
|
case sql.Scanner:
|
||||||
|
return &scanPlanSQLScanner{formatCode: format}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &scanPlanXMLToXMLUnmarshal{
|
||||||
|
unmarshal: c.Unmarshal,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type scanPlanXMLToByteSlice struct{}
|
||||||
|
|
||||||
|
func (scanPlanXMLToByteSlice) Scan(src []byte, dst any) error {
|
||||||
|
dstBuf := dst.(*[]byte)
|
||||||
|
if src == nil {
|
||||||
|
*dstBuf = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*dstBuf = make([]byte, len(src))
|
||||||
|
copy(*dstBuf, src)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type scanPlanXMLToXMLUnmarshal struct {
|
||||||
|
unmarshal func(data []byte, v any) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *scanPlanXMLToXMLUnmarshal) Scan(src []byte, dst any) error {
|
||||||
|
if src == nil {
|
||||||
|
dstValue := reflect.ValueOf(dst)
|
||||||
|
if dstValue.Kind() == reflect.Ptr {
|
||||||
|
el := dstValue.Elem()
|
||||||
|
switch el.Kind() {
|
||||||
|
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Struct:
|
||||||
|
el.Set(reflect.Zero(el.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("cannot scan NULL into %T", dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
elem := reflect.ValueOf(dst).Elem()
|
||||||
|
elem.Set(reflect.Zero(elem.Type()))
|
||||||
|
|
||||||
|
return s.unmarshal(src, dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *XMLCodec) DecodeDatabaseSQLValue(m *Map, oid uint32, format int16, src []byte) (driver.Value, error) {
|
||||||
|
if src == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dstBuf := make([]byte, len(src))
|
||||||
|
copy(dstBuf, src)
|
||||||
|
return dstBuf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *XMLCodec) DecodeValue(m *Map, oid uint32, format int16, src []byte) (any, error) {
|
||||||
|
if src == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var dst any
|
||||||
|
err := c.Unmarshal(src, &dst)
|
||||||
|
return dst, err
|
||||||
|
}
|
13
vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
generated
vendored
13
vendor/github.com/jackc/pgx/v5/pgxpool/tx.go
generated
vendored
|
@ -18,9 +18,10 @@ func (tx *Tx) Begin(ctx context.Context) (pgx.Tx, error) {
|
||||||
return tx.t.Begin(ctx)
|
return tx.t.Begin(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return ErrTxClosed
|
// Commit commits the transaction and returns the associated connection back to the Pool. Commit will return an error
|
||||||
// if the Tx is already closed, but is otherwise safe to call multiple times. If the commit fails with a rollback status
|
// where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call multiple times. If
|
||||||
// (e.g. the transaction was already in a broken state) then ErrTxCommitRollback will be returned.
|
// the commit fails with a rollback status (e.g. the transaction was already in a broken state) then ErrTxCommitRollback
|
||||||
|
// will be returned.
|
||||||
func (tx *Tx) Commit(ctx context.Context) error {
|
func (tx *Tx) Commit(ctx context.Context) error {
|
||||||
err := tx.t.Commit(ctx)
|
err := tx.t.Commit(ctx)
|
||||||
if tx.c != nil {
|
if tx.c != nil {
|
||||||
|
@ -30,9 +31,9 @@ func (tx *Tx) Commit(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return ErrTxClosed
|
// Rollback rolls back the transaction and returns the associated connection back to the Pool. Rollback will return
|
||||||
// if the Tx is already closed, but is otherwise safe to call multiple times. Hence, defer tx.Rollback() is safe even if
|
// where an error where errors.Is(ErrTxClosed) is true if the Tx is already closed, but is otherwise safe to call
|
||||||
// tx.Commit() will be called first in a non-error condition.
|
// multiple times. Hence, defer tx.Rollback() is safe even if tx.Commit() will be called first in a non-error condition.
|
||||||
func (tx *Tx) Rollback(ctx context.Context) error {
|
func (tx *Tx) Rollback(ctx context.Context) error {
|
||||||
err := tx.t.Rollback(ctx)
|
err := tx.t.Rollback(ctx)
|
||||||
if tx.c != nil {
|
if tx.c != nil {
|
||||||
|
|
19
vendor/github.com/jackc/pgx/v5/rows.go
generated
vendored
19
vendor/github.com/jackc/pgx/v5/rows.go
generated
vendored
|
@ -797,7 +797,7 @@ func computeNamedStructFields(
|
||||||
if !dbTagPresent {
|
if !dbTagPresent {
|
||||||
colName = sf.Name
|
colName = sf.Name
|
||||||
}
|
}
|
||||||
fpos := fieldPosByName(fldDescs, colName)
|
fpos := fieldPosByName(fldDescs, colName, !dbTagPresent)
|
||||||
if fpos == -1 {
|
if fpos == -1 {
|
||||||
if missingField == "" {
|
if missingField == "" {
|
||||||
missingField = colName
|
missingField = colName
|
||||||
|
@ -816,17 +816,22 @@ func computeNamedStructFields(
|
||||||
|
|
||||||
const structTagKey = "db"
|
const structTagKey = "db"
|
||||||
|
|
||||||
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string) (i int) {
|
func fieldPosByName(fldDescs []pgconn.FieldDescription, field string, normalize bool) (i int) {
|
||||||
i = -1
|
i = -1
|
||||||
for i, desc := range fldDescs {
|
|
||||||
|
|
||||||
// Snake case support.
|
if normalize {
|
||||||
field = strings.ReplaceAll(field, "_", "")
|
field = strings.ReplaceAll(field, "_", "")
|
||||||
descName := strings.ReplaceAll(desc.Name, "_", "")
|
}
|
||||||
|
for i, desc := range fldDescs {
|
||||||
if strings.EqualFold(descName, field) {
|
if normalize {
|
||||||
|
if strings.EqualFold(strings.ReplaceAll(desc.Name, "_", ""), field) {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if desc.Name == field {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
24
vendor/github.com/jackc/pgx/v5/stdlib/sql.go
generated
vendored
24
vendor/github.com/jackc/pgx/v5/stdlib/sql.go
generated
vendored
|
@ -75,6 +75,7 @@
|
||||||
"math"
|
"math"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -98,7 +99,7 @@ func init() {
|
||||||
|
|
||||||
// if pgx driver was already registered by different pgx major version then we
|
// if pgx driver was already registered by different pgx major version then we
|
||||||
// skip registration under the default name.
|
// skip registration under the default name.
|
||||||
if !contains(sql.Drivers(), "pgx") {
|
if !slices.Contains(sql.Drivers(), "pgx") {
|
||||||
sql.Register("pgx", pgxDriver)
|
sql.Register("pgx", pgxDriver)
|
||||||
}
|
}
|
||||||
sql.Register("pgx/v5", pgxDriver)
|
sql.Register("pgx/v5", pgxDriver)
|
||||||
|
@ -120,17 +121,6 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO replace by slices.Contains when experimental package will be merged to stdlib
|
|
||||||
// https://pkg.go.dev/golang.org/x/exp/slices#Contains
|
|
||||||
func contains(list []string, y string) bool {
|
|
||||||
for _, x := range list {
|
|
||||||
if x == y {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// OptionOpenDB options for configuring the driver when opening a new db pool.
|
// OptionOpenDB options for configuring the driver when opening a new db pool.
|
||||||
type OptionOpenDB func(*connector)
|
type OptionOpenDB func(*connector)
|
||||||
|
|
||||||
|
@ -805,6 +795,16 @@ func (r *Rows) Next(dest []driver.Value) error {
|
||||||
}
|
}
|
||||||
return d.Value()
|
return d.Value()
|
||||||
}
|
}
|
||||||
|
case pgtype.XMLOID:
|
||||||
|
var d []byte
|
||||||
|
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||||
|
r.valueFuncs[i] = func(src []byte) (driver.Value, error) {
|
||||||
|
err := scanPlan.Scan(src, &d)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
var d string
|
var d string
|
||||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||||
|
|
5
vendor/github.com/jackc/puddle/v2/CHANGELOG.md
generated
vendored
5
vendor/github.com/jackc/puddle/v2/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,8 @@
|
||||||
|
# 2.2.2 (September 10, 2024)
|
||||||
|
|
||||||
|
* Add empty acquire time to stats (Maxim Ivanov)
|
||||||
|
* Stop importing nanotime from runtime via linkname (maypok86)
|
||||||
|
|
||||||
# 2.2.1 (July 15, 2023)
|
# 2.2.1 (July 15, 2023)
|
||||||
|
|
||||||
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
|
* Fix: CreateResource cannot overflow pool. This changes documented behavior of CreateResource. Previously,
|
||||||
|
|
2
vendor/github.com/jackc/puddle/v2/README.md
generated
vendored
2
vendor/github.com/jackc/puddle/v2/README.md
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
[![](https://godoc.org/github.com/jackc/puddle?status.svg)](https://godoc.org/github.com/jackc/puddle)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/puddle/v2.svg)](https://pkg.go.dev/github.com/jackc/puddle/v2)
|
||||||
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
|
![Build Status](https://github.com/jackc/puddle/actions/workflows/ci.yml/badge.svg)
|
||||||
|
|
||||||
# Puddle
|
# Puddle
|
||||||
|
|
16
vendor/github.com/jackc/puddle/v2/nanotime.go
generated
vendored
Normal file
16
vendor/github.com/jackc/puddle/v2/nanotime.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package puddle
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// nanotime returns the time in nanoseconds since process start.
|
||||||
|
//
|
||||||
|
// This approach, described at
|
||||||
|
// https://github.com/golang/go/issues/61765#issuecomment-1672090302,
|
||||||
|
// is fast, monotonic, and portable, and avoids the previous
|
||||||
|
// dependence on runtime.nanotime using the (unsafe) linkname hack.
|
||||||
|
// In particular, time.Since does less work than time.Now.
|
||||||
|
func nanotime() int64 {
|
||||||
|
return time.Since(globalStart).Nanoseconds()
|
||||||
|
}
|
||||||
|
|
||||||
|
var globalStart = time.Now()
|
13
vendor/github.com/jackc/puddle/v2/nanotime_time.go
generated
vendored
13
vendor/github.com/jackc/puddle/v2/nanotime_time.go
generated
vendored
|
@ -1,13 +0,0 @@
|
||||||
//go:build purego || appengine || js
|
|
||||||
|
|
||||||
// This file contains the safe implementation of nanotime using time.Now().
|
|
||||||
|
|
||||||
package puddle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func nanotime() int64 {
|
|
||||||
return time.Now().UnixNano()
|
|
||||||
}
|
|
12
vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
generated
vendored
12
vendor/github.com/jackc/puddle/v2/nanotime_unsafe.go
generated
vendored
|
@ -1,12 +0,0 @@
|
||||||
//go:build !purego && !appengine && !js
|
|
||||||
|
|
||||||
// This file contains the implementation of nanotime using runtime.nanotime.
|
|
||||||
|
|
||||||
package puddle
|
|
||||||
|
|
||||||
import "unsafe"
|
|
||||||
|
|
||||||
var _ = unsafe.Sizeof(0)
|
|
||||||
|
|
||||||
//go:linkname nanotime runtime.nanotime
|
|
||||||
func nanotime() int64
|
|
20
vendor/github.com/jackc/puddle/v2/pool.go
generated
vendored
20
vendor/github.com/jackc/puddle/v2/pool.go
generated
vendored
|
@ -139,6 +139,7 @@ type Pool[T any] struct {
|
||||||
acquireCount int64
|
acquireCount int64
|
||||||
acquireDuration time.Duration
|
acquireDuration time.Duration
|
||||||
emptyAcquireCount int64
|
emptyAcquireCount int64
|
||||||
|
emptyAcquireWaitTime time.Duration
|
||||||
canceledAcquireCount atomic.Int64
|
canceledAcquireCount atomic.Int64
|
||||||
|
|
||||||
resetCount int
|
resetCount int
|
||||||
|
@ -154,7 +155,7 @@ type Config[T any] struct {
|
||||||
MaxSize int32
|
MaxSize int32
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPool creates a new pool. Panics if maxSize is less than 1.
|
// NewPool creates a new pool. Returns an error iff MaxSize is less than 1.
|
||||||
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
|
func NewPool[T any](config *Config[T]) (*Pool[T], error) {
|
||||||
if config.MaxSize < 1 {
|
if config.MaxSize < 1 {
|
||||||
return nil, errors.New("MaxSize must be >= 1")
|
return nil, errors.New("MaxSize must be >= 1")
|
||||||
|
@ -202,6 +203,7 @@ type Stat struct {
|
||||||
acquireCount int64
|
acquireCount int64
|
||||||
acquireDuration time.Duration
|
acquireDuration time.Duration
|
||||||
emptyAcquireCount int64
|
emptyAcquireCount int64
|
||||||
|
emptyAcquireWaitTime time.Duration
|
||||||
canceledAcquireCount int64
|
canceledAcquireCount int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -251,6 +253,13 @@ func (s *Stat) EmptyAcquireCount() int64 {
|
||||||
return s.emptyAcquireCount
|
return s.emptyAcquireCount
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EmptyAcquireWaitTime returns the cumulative time waited for successful acquires
|
||||||
|
// from the pool for a resource to be released or constructed because the pool was
|
||||||
|
// empty.
|
||||||
|
func (s *Stat) EmptyAcquireWaitTime() time.Duration {
|
||||||
|
return s.emptyAcquireWaitTime
|
||||||
|
}
|
||||||
|
|
||||||
// CanceledAcquireCount returns the cumulative count of acquires from the pool
|
// CanceledAcquireCount returns the cumulative count of acquires from the pool
|
||||||
// that were canceled by a context.
|
// that were canceled by a context.
|
||||||
func (s *Stat) CanceledAcquireCount() int64 {
|
func (s *Stat) CanceledAcquireCount() int64 {
|
||||||
|
@ -266,6 +275,7 @@ func (p *Pool[T]) Stat() *Stat {
|
||||||
maxResources: p.maxSize,
|
maxResources: p.maxSize,
|
||||||
acquireCount: p.acquireCount,
|
acquireCount: p.acquireCount,
|
||||||
emptyAcquireCount: p.emptyAcquireCount,
|
emptyAcquireCount: p.emptyAcquireCount,
|
||||||
|
emptyAcquireWaitTime: p.emptyAcquireWaitTime,
|
||||||
canceledAcquireCount: p.canceledAcquireCount.Load(),
|
canceledAcquireCount: p.canceledAcquireCount.Load(),
|
||||||
acquireDuration: p.acquireDuration,
|
acquireDuration: p.acquireDuration,
|
||||||
}
|
}
|
||||||
|
@ -363,11 +373,13 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
|
||||||
|
|
||||||
// If a resource is available in the pool.
|
// If a resource is available in the pool.
|
||||||
if res := p.tryAcquireIdleResource(); res != nil {
|
if res := p.tryAcquireIdleResource(); res != nil {
|
||||||
|
waitTime := time.Duration(nanotime() - startNano)
|
||||||
if waitedForLock {
|
if waitedForLock {
|
||||||
p.emptyAcquireCount += 1
|
p.emptyAcquireCount += 1
|
||||||
|
p.emptyAcquireWaitTime += waitTime
|
||||||
}
|
}
|
||||||
p.acquireCount += 1
|
p.acquireCount += 1
|
||||||
p.acquireDuration += time.Duration(nanotime() - startNano)
|
p.acquireDuration += waitTime
|
||||||
p.mux.Unlock()
|
p.mux.Unlock()
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
@ -391,7 +403,9 @@ func (p *Pool[T]) acquire(ctx context.Context) (*Resource[T], error) {
|
||||||
|
|
||||||
p.emptyAcquireCount += 1
|
p.emptyAcquireCount += 1
|
||||||
p.acquireCount += 1
|
p.acquireCount += 1
|
||||||
p.acquireDuration += time.Duration(nanotime() - startNano)
|
waitTime := time.Duration(nanotime() - startNano)
|
||||||
|
p.acquireDuration += waitTime
|
||||||
|
p.emptyAcquireWaitTime += waitTime
|
||||||
|
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
8
vendor/modules.txt
vendored
8
vendor/modules.txt
vendored
|
@ -413,11 +413,11 @@ github.com/inconshreveable/mousetrap
|
||||||
# github.com/jackc/pgpassfile v1.0.0
|
# github.com/jackc/pgpassfile v1.0.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/jackc/pgpassfile
|
github.com/jackc/pgpassfile
|
||||||
# github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a
|
# github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761
|
||||||
## explicit; go 1.14
|
## explicit; go 1.14
|
||||||
github.com/jackc/pgservicefile
|
github.com/jackc/pgservicefile
|
||||||
# github.com/jackc/pgx/v5 v5.6.0
|
# github.com/jackc/pgx/v5 v5.7.1
|
||||||
## explicit; go 1.20
|
## explicit; go 1.21
|
||||||
github.com/jackc/pgx/v5
|
github.com/jackc/pgx/v5
|
||||||
github.com/jackc/pgx/v5/internal/iobufpool
|
github.com/jackc/pgx/v5/internal/iobufpool
|
||||||
github.com/jackc/pgx/v5/internal/pgio
|
github.com/jackc/pgx/v5/internal/pgio
|
||||||
|
@ -430,7 +430,7 @@ github.com/jackc/pgx/v5/pgproto3
|
||||||
github.com/jackc/pgx/v5/pgtype
|
github.com/jackc/pgx/v5/pgtype
|
||||||
github.com/jackc/pgx/v5/pgxpool
|
github.com/jackc/pgx/v5/pgxpool
|
||||||
github.com/jackc/pgx/v5/stdlib
|
github.com/jackc/pgx/v5/stdlib
|
||||||
# github.com/jackc/puddle/v2 v2.2.1
|
# github.com/jackc/puddle/v2 v2.2.2
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/jackc/puddle/v2
|
github.com/jackc/puddle/v2
|
||||||
github.com/jackc/puddle/v2/internal/genstack
|
github.com/jackc/puddle/v2/internal/genstack
|
||||||
|
|
Loading…
Reference in a new issue