bumps our uptrace/bun dependencies to v1.2.10 (#3865)

This commit is contained in:
kim 2025-03-03 10:42:05 +00:00 committed by GitHub
parent ddd9210614
commit 67a2b3650c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 518 additions and 225 deletions

10
go.mod
View file

@ -66,10 +66,10 @@ require (
github.com/tetratelabs/wazero v1.9.0
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
github.com/ulule/limiter/v3 v3.11.2
github.com/uptrace/bun v1.2.9
github.com/uptrace/bun/dialect/pgdialect v1.2.9
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
github.com/uptrace/bun/extra/bunotel v1.2.9
github.com/uptrace/bun v1.2.10
github.com/uptrace/bun/dialect/pgdialect v1.2.10
github.com/uptrace/bun/dialect/sqlitedialect v1.2.10
github.com/uptrace/bun/extra/bunotel v1.2.10
github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.7.8
go.opentelemetry.io/otel v1.34.0
@ -186,7 +186,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.0 // indirect
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect

20
go.sum generated
View file

@ -351,8 +351,8 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/puzpuzpuz/xsync/v3 v3.5.0 h1:i+cMcpEDY1BkNm7lPDkCtE4oElsYLn+EKF8kAu2vXT4=
github.com/puzpuzpuz/xsync/v3 v3.5.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b h1:aUNXCGgukb4gtY99imuIeoh8Vr0GSwAlYxPAhqZrpFc=
github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b/go.mod h1:wTPjTepVu7uJBYgZ0SdWHQlIas582j6cn2jgk4DDdlg=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
@ -450,14 +450,14 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
github.com/uptrace/bun v1.2.9 h1:OOt2DlIcRUMSZPr6iXDFg/LaQd59kOxbAjpIVHddKRs=
github.com/uptrace/bun v1.2.9/go.mod h1:r2ZaaGs9Ru5bpGTr8GQfp8jp+TlCav9grYCPOu2CJSg=
github.com/uptrace/bun/dialect/pgdialect v1.2.9 h1:caf5uFbOGiXvadV6pA5gn87k0awFFxL1kuuY3SpxnWk=
github.com/uptrace/bun/dialect/pgdialect v1.2.9/go.mod h1:m7L9JtOp/Lt8HccET70ULxplMweE/u0S9lNUSxz2duo=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9 h1:HLzGWXBh07sT8zhVPy6veYbbGrAtYq0KzyRHXBj+GjA=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.9/go.mod h1:dUR+ecoCWA0FIa9vhQVRnGtYYPpuCLJoEEtX9E1aiBU=
github.com/uptrace/bun/extra/bunotel v1.2.9 h1:BGGrBga+iVL78SGiMpLt2N9MAKvrG3f8wLk8zCLwFJg=
github.com/uptrace/bun/extra/bunotel v1.2.9/go.mod h1:6dVl5Ko6xOhuoqUPWHpfFrntBDwmOnq0OMiR/SGwAC8=
github.com/uptrace/bun v1.2.10 h1:6TlxUQhGxiiv7MHjzxbV6ZNt/Im0PIQ3S45riAmbnGA=
github.com/uptrace/bun v1.2.10/go.mod h1:ww5G8h59UrOnCHmZ8O1I/4Djc7M/Z3E+EWFS2KLB6dQ=
github.com/uptrace/bun/dialect/pgdialect v1.2.10 h1:+PAGCVyWDoAjMuAgn0+ud7fu3It8+Xvk7HQAJ5wCXMQ=
github.com/uptrace/bun/dialect/pgdialect v1.2.10/go.mod h1:hv0zsoc3PeW5fl3JeBglZT1vl2FoERY+QwvuvKsKATA=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.10 h1:/74GDx1hnRrrmIvqpNbbFwD28sW1z+i/QjQSVy6XnnY=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.10/go.mod h1:xBx+N2q4G4s51tAxZU5vKB3Zu0bFl1uRmKqZwCPBilg=
github.com/uptrace/bun/extra/bunotel v1.2.10 h1:Qkg0PrpcnlC9AvqCfqTL3seZHc5t1siKdSFUPCxql+Q=
github.com/uptrace/bun/extra/bunotel v1.2.10/go.mod h1:FP1Bx8AIK8WYVM1OL/ynpcnkg7xjBkTCB91PEjFhdmU=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=

View file

@ -294,11 +294,32 @@ func(interface{}, bool) (interface{}, bool) {
)
}
// LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function, and
// then stores and returns the computed value. The loaded result is
// true if the value was loaded, false if computed.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return valueFn(), false
},
true,
false,
)
}
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if success, returns the computed value. The loaded result is true
// if the value was loaded, false if stored. If the compute attempt was
// cancelled, a nil will be returned.
// and, if successful, stores and returns the computed value. The loaded
// result is true if the value was loaded, or false if computed (whether
// successfully or not). If the compute attempt was cancelled (due to an
// error, for example), a nil value will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
@ -322,26 +343,6 @@ func(interface{}, bool) (interface{}, bool) {
)
}
// LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function and
// returns the computed value. The loaded result is true if the value
// was loaded, false if stored.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
// the bucket will be blocked until the valueFn executes. Consider
// this when the function includes long-running operations.
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
return m.doCompute(
key,
func(interface{}, bool) (interface{}, bool) {
return valueFn(), false
},
true,
false,
)
}
// Compute either sets the computed new value for the key or deletes
// the value for the key. When the delete result of the valueFn function
// is set to true, the value will be deleted, if it exists. When delete

View file

@ -239,9 +239,9 @@ func(V, bool) (V, bool) {
}
// LoadOrCompute returns the existing value for the key if present.
// Otherwise, it computes the value using the provided function and
// returns the computed value. The loaded result is true if the value
// was loaded, false if stored.
// Otherwise, it computes the value using the provided function, and
// then stores and returns the computed value. The loaded result is
// true if the value was loaded, false if computed.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in
@ -260,9 +260,10 @@ func(V, bool) (V, bool) {
// LoadOrTryCompute returns the existing value for the key if present.
// Otherwise, it tries to compute the value using the provided function
// and, if success, returns the computed value. The loaded result is true
// if the value was loaded, false if stored. If the compute attempt was
// cancelled, a zero value of type V will be returned.
// and, if successful, stores and returns the computed value. The loaded
// result is true if the value was loaded, or false if computed (whether
// successfully or not). If the compute attempt was cancelled (due to an
// error, for example), a zero value of type V will be returned.
//
// This call locks a hash table bucket while the compute function
// is executed. It means that modifications on other entries in

View file

@ -1,3 +1,25 @@
## [1.2.10](https://github.com/uptrace/bun/compare/v1.2.9...v1.2.10) (2025-02-18)
### Bug Fixes
* clone query in scanAndCountConcurrently to avoid data race ([66fdc39](https://github.com/uptrace/bun/commit/66fdc39b33a482534920578ed8c7f88c3f142a3d)), closes [#1117](https://github.com/uptrace/bun/issues/1117)
* **create_table:** avoid creating unintended foreign keys ([#1130](https://github.com/uptrace/bun/issues/1130)) ([187743b](https://github.com/uptrace/bun/commit/187743b1e743755cd57a9cc11e7f2f9cea0a7dcd))
* **pgdialect:** handle []*time.Time arrays ([4c4e12a](https://github.com/uptrace/bun/commit/4c4e12aa7f27cf49189427da5104afb436af4348))
* **pgdialect:** handle nill array on jsonb column ([0dc4e3e](https://github.com/uptrace/bun/commit/0dc4e3edb3f9021b02ed6f80d54cb88d2ef9b025))
* **pgdialect:** postgres syntax errors for slices of pointers and json arrays [#877](https://github.com/uptrace/bun/issues/877) ([1422b77](https://github.com/uptrace/bun/commit/1422b7726a24ac55ee6ca0e15ec084c34f7b1bd6))
* process embedded's struct field for table ([b410e42](https://github.com/uptrace/bun/commit/b410e420ab888d87d2b6ebb014f13baae8fdc2b7)), closes [#1125](https://github.com/uptrace/bun/issues/1125)
### Features
* add DBReplica and use it in ReadWriteConnResolver ([95c825e](https://github.com/uptrace/bun/commit/95c825e1215b26456caeebf1893d3b6183202bae))
* allow setting a query comment through a context value ([9f5ccfe](https://github.com/uptrace/bun/commit/9f5ccfea7144c7ced877e3ce6972c589c5c6c3e6))
* **bunotel:** ability to override span names ([04e2125](https://github.com/uptrace/bun/commit/04e21253298ee495179754fbbfccc047468a034d))
* **bunotel:** always record affected rows ([960a304](https://github.com/uptrace/bun/commit/960a3046ad0cc8ea548dc448380549f610cb5da4))
## [1.2.9](https://github.com/uptrace/bun/compare/v1.2.8...v1.2.9) (2025-01-26)

126
vendor/github.com/uptrace/bun/db.go generated vendored
View file

@ -2,14 +2,13 @@
import (
"context"
"crypto/rand"
cryptorand "crypto/rand"
"database/sql"
"encoding/hex"
"fmt"
"reflect"
"strings"
"sync/atomic"
"time"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/internal"
@ -41,6 +40,12 @@ func WithDiscardUnknownColumns() DBOption {
}
}
// ConnResolver enables routing queries to multiple databases.
type ConnResolver interface {
ResolveConn(query Query) IConn
Close() error
}
func WithConnResolver(resolver ConnResolver) DBOption {
return func(db *DB) {
db.resolver = resolver
@ -633,7 +638,7 @@ func (tx Tx) Begin() (Tx, error) {
func (tx Tx) BeginTx(ctx context.Context, _ *sql.TxOptions) (Tx, error) {
// mssql savepoint names are limited to 32 characters
sp := make([]byte, 14)
_, err := rand.Read(sp)
_, err := cryptorand.Read(sp)
if err != nil {
return Tx{}, err
}
@ -739,121 +744,6 @@ func (tx Tx) NewDropColumn() *DropColumnQuery {
return NewDropColumnQuery(tx.db).Conn(tx)
}
//------------------------------------------------------------------------------
func (db *DB) makeQueryBytes() []byte {
return internal.MakeQueryBytes()
}
//------------------------------------------------------------------------------
// ConnResolver enables routing queries to multiple databases.
type ConnResolver interface {
ResolveConn(query Query) IConn
Close() error
}
// TODO:
// - make monitoring interval configurable
// - make ping timeout configutable
// - allow adding read/write replicas for multi-master replication
type ReadWriteConnResolver struct {
replicas []*sql.DB // read-only replicas
healthyReplicas atomic.Pointer[[]*sql.DB]
nextReplica atomic.Int64
closed atomic.Bool
}
func NewReadWriteConnResolver(opts ...ReadWriteConnResolverOption) *ReadWriteConnResolver {
r := new(ReadWriteConnResolver)
for _, opt := range opts {
opt(r)
}
if len(r.replicas) > 0 {
r.healthyReplicas.Store(&r.replicas)
go r.monitor()
}
return r
}
type ReadWriteConnResolverOption func(r *ReadWriteConnResolver)
func WithReadOnlyReplica(dbs ...*sql.DB) ReadWriteConnResolverOption {
return func(r *ReadWriteConnResolver) {
r.replicas = append(r.replicas, dbs...)
}
}
func (r *ReadWriteConnResolver) Close() error {
if r.closed.Swap(true) {
return nil
}
var firstErr error
for _, db := range r.replicas {
if err := db.Close(); err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
// healthyReplica returns a random healthy replica.
func (r *ReadWriteConnResolver) ResolveConn(query Query) IConn {
if len(r.replicas) == 0 || !isReadOnlyQuery(query) {
return nil
}
replicas := r.loadHealthyReplicas()
if len(replicas) == 0 {
return nil
}
if len(replicas) == 1 {
return replicas[0]
}
i := r.nextReplica.Add(1)
return replicas[int(i)%len(replicas)]
}
func isReadOnlyQuery(query Query) bool {
sel, ok := query.(*SelectQuery)
if !ok {
return false
}
for _, el := range sel.with {
if !isReadOnlyQuery(el.query) {
return false
}
}
return true
}
func (r *ReadWriteConnResolver) loadHealthyReplicas() []*sql.DB {
if ptr := r.healthyReplicas.Load(); ptr != nil {
return *ptr
}
return nil
}
func (r *ReadWriteConnResolver) monitor() {
const interval = 5 * time.Second
for !r.closed.Load() {
healthy := make([]*sql.DB, 0, len(r.replicas))
for _, replica := range r.replicas {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
err := replica.PingContext(ctx)
cancel()
if err == nil {
healthy = append(healthy, replica)
}
}
r.healthyReplicas.Store(&healthy)
time.Sleep(interval)
}
}

View file

@ -142,6 +142,10 @@ func (d *Dialect) arrayElemAppender(typ reflect.Type) schema.AppenderFunc {
if typ.Implements(driverValuerType) {
return arrayAppendDriverValue
}
if typ == timeType {
return appendTimeElemValue
}
switch typ.Kind() {
case reflect.String:
return appendStringElemValue
@ -149,10 +153,20 @@ func (d *Dialect) arrayElemAppender(typ reflect.Type) schema.AppenderFunc {
if typ.Elem().Kind() == reflect.Uint8 {
return appendBytesElemValue
}
case reflect.Ptr:
return schema.PtrAppender(d.arrayElemAppender(typ.Elem()))
}
return schema.Appender(d, typ)
}
func appendTimeElemValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
ts := v.Convert(timeType).Interface().(time.Time)
b = append(b, '"')
b = appendTime(b, ts)
return append(b, '"')
}
func appendStringElemValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return appendStringElem(b, v.String())
}

View file

@ -11,15 +11,23 @@ type arrayParser struct {
elem []byte
err error
isJson bool
}
func newArrayParser(b []byte) *arrayParser {
p := new(arrayParser)
if len(b) < 2 || b[0] != '{' || b[len(b)-1] != '}' {
if b[0] == 'n' {
p.p.Reset(nil)
return p
}
if len(b) < 2 || (b[0] != '{' && b[0] != '[') || (b[len(b)-1] != '}' && b[len(b)-1] != ']') {
p.err = fmt.Errorf("pgdialect: can't parse array: %q", b)
return p
}
p.isJson = b[0] == '['
p.p.Reset(b[1 : len(b)-1])
return p
@ -51,7 +59,7 @@ func (p *arrayParser) readNext() error {
}
switch ch {
case '}':
case '}', ']':
return io.EOF
case '"':
b, err := p.p.ReadSubstring(ch)
@ -78,16 +86,34 @@ func (p *arrayParser) readNext() error {
p.elem = rng
return nil
default:
lit := p.p.ReadLiteral(ch)
if bytes.Equal(lit, []byte("NULL")) {
lit = nil
}
if ch == '{' && p.isJson {
json, err := p.p.ReadJSON()
if err != nil {
return err
}
if p.p.Peek() == ',' {
p.p.Advance()
}
for {
if p.p.Peek() == ',' || p.p.Peek() == ' ' {
p.p.Advance()
} else {
break
}
}
p.elem = lit
return nil
p.elem = json
return nil
} else {
lit := p.p.ReadLiteral(ch)
if bytes.Equal(lit, []byte("NULL")) {
lit = nil
}
if p.p.Peek() == ',' {
p.p.Advance()
}
p.elem = lit
return nil
}
}
}

View file

@ -105,3 +105,39 @@ func (p *pgparser) ReadRange(ch byte) ([]byte, error) {
return p.buf, nil
}
func (p *pgparser) ReadJSON() ([]byte, error) {
p.Unread()
c, err := p.ReadByte()
if err != nil {
return nil, err
}
p.buf = p.buf[:0]
depth := 0
for {
switch c {
case '{':
depth++
case '}':
depth--
}
p.buf = append(p.buf, c)
if depth == 0 {
break
}
next, err := p.ReadByte()
if err != nil {
return nil, err
}
c = next
}
return p.buf, nil
}

View file

@ -86,6 +86,10 @@ func fieldSQLType(field *schema.Field) string {
}
func sqlType(typ reflect.Type) string {
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
switch typ {
case nullStringType: // typ.Kind() == reflect.Struct, test for exact match
return sqltype.VarChar

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.9"
return "1.2.10"
}

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.9"
return "1.2.10"
}

View file

@ -1,6 +1,7 @@
package bunotel
import (
"github.com/uptrace/bun"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
@ -33,6 +34,14 @@ func WithFormattedQueries(format bool) Option {
}
}
// WithSpanNameFormatter takes a function that determines the span name
// for a given query event.
func WithSpanNameFormatter(f func(*bun.QueryEvent) string) Option {
return func(h *QueryHook) {
h.spanNameFormatter = f
}
}
// WithTracerProvider returns an Option to use the TracerProvider when
// creating a Tracer.
func WithTracerProvider(tp trace.TracerProvider) Option {

View file

@ -22,11 +22,12 @@
)
type QueryHook struct {
attrs []attribute.KeyValue
formatQueries bool
tracer trace.Tracer
meter metric.Meter
queryHistogram metric.Int64Histogram
attrs []attribute.KeyValue
formatQueries bool
tracer trace.Tracer
meter metric.Meter
queryHistogram metric.Int64Histogram
spanNameFormatter func(*bun.QueryEvent) string
}
var _ bun.QueryHook = (*QueryHook)(nil)
@ -86,7 +87,11 @@ func (h *QueryHook) AfterQuery(ctx context.Context, event *bun.QueryEvent) {
return
}
span.SetName(operation)
name := operation
if h.spanNameFormatter != nil {
name = h.spanNameFormatter(event)
}
span.SetName(name)
defer span.End()
query := h.eventQuery(event)
@ -106,9 +111,8 @@ func (h *QueryHook) AfterQuery(ctx context.Context, event *bun.QueryEvent) {
attrs = append(attrs, sys)
}
if event.Result != nil {
if n, _ := event.Result.RowsAffected(); n > 0 {
attrs = append(attrs, attribute.Int64("db.rows_affected", n))
}
rows, _ := event.Result.RowsAffected()
attrs = append(attrs, attribute.Int64("db.rows_affected", rows))
}
switch event.Err {

View file

@ -1,6 +1,6 @@
{
"name": "gobun",
"version": "1.2.9",
"version": "1.2.10",
"main": "index.js",
"repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",

View file

@ -1475,3 +1475,16 @@ func (q *orderLimitOffsetQuery) appendLimitOffset(fmter schema.Formatter, b []by
return b, nil
}
func IsReadOnlyQuery(query Query) bool {
sel, ok := query.(*SelectQuery)
if !ok {
return false
}
for _, el := range sel.with {
if !IsReadOnlyQuery(el.query) {
return false
}
}
return true
}

View file

@ -137,6 +137,9 @@ func (q *AddColumnQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Res
return nil, feature.NewNotSupportError(feature.AlterColumnExists)
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -129,6 +129,9 @@ func (q *DropColumnQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byt
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -127,7 +127,7 @@ func (q *DeleteQuery) WhereAllWithDeleted() *DeleteQuery {
func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.addOrder(orders...)
@ -136,7 +136,7 @@ func (q *DeleteQuery) Order(orders ...string) *DeleteQuery {
func (q *DeleteQuery) OrderExpr(query string, args ...interface{}) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.addOrderExpr(query, args...)
@ -151,7 +151,7 @@ func (q *DeleteQuery) ForceDelete() *DeleteQuery {
// ------------------------------------------------------------------------------
func (q *DeleteQuery) Limit(n int) *DeleteQuery {
if !q.hasFeature(feature.DeleteOrderLimit) {
q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
q.setLimit(n)
@ -165,7 +165,7 @@ func (q *DeleteQuery) Limit(n int) *DeleteQuery {
// To suppress the auto-generated RETURNING clause, use `Returning("NULL")`.
func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery {
if !q.hasFeature(feature.DeleteReturning) {
q.err = feature.NewNotSupportError(feature.DeleteOrderLimit)
q.setErr(feature.NewNotSupportError(feature.DeleteOrderLimit))
return q
}
@ -321,6 +321,9 @@ func (q *DeleteQuery) scanOrExec(
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {

View file

@ -248,6 +248,9 @@ func (q *CreateIndexQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -115,6 +115,9 @@ func (q *DropIndexQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte
//------------------------------------------------------------------------------
func (q *DropIndexQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -114,7 +114,7 @@ func (q *InsertQuery) ExcludeColumn(columns ...string) *InsertQuery {
// Value overwrites model value for the column.
func (q *InsertQuery) Value(column string, expr string, args ...interface{}) *InsertQuery {
if q.table == nil {
q.err = errNilModel
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, expr, args)
@ -586,6 +586,9 @@ func (q *InsertQuery) scanOrExec(
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {

View file

@ -30,7 +30,7 @@ func NewMergeQuery(db *DB) *MergeQuery {
},
}
if q.db.dialect.Name() != dialect.MSSQL && q.db.dialect.Name() != dialect.PG {
q.err = errors.New("bun: merge not supported for current dialect")
q.setErr(errors.New("bun: merge not supported for current dialect"))
}
return q
}
@ -243,6 +243,9 @@ func (q *MergeQuery) scanOrExec(
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {

View file

@ -67,6 +67,9 @@ func (q *RawQuery) scanOrExec(
}
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
query := q.db.format(q.query, q.args)
var res sql.Result

View file

@ -354,7 +354,7 @@ func (q *SelectQuery) JoinOnOr(cond string, args ...interface{}) *SelectQuery {
func (q *SelectQuery) joinOn(cond string, args []interface{}, sep string) *SelectQuery {
if len(q.joins) == 0 {
q.err = errors.New("bun: query has no joins")
q.setErr(errors.New("bun: query has no joins"))
return q
}
j := &q.joins[len(q.joins)-1]
@ -791,6 +791,9 @@ func (q *SelectQuery) Rows(ctx context.Context) (*sql.Rows, error) {
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err
@ -812,6 +815,9 @@ func (q *SelectQuery) Exec(ctx context.Context, dest ...interface{}) (res sql.Re
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err
@ -872,6 +878,9 @@ func (q *SelectQuery) scanResult(ctx context.Context, dest ...interface{}) (sql.
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err
@ -924,6 +933,9 @@ func (q *SelectQuery) Count(ctx context.Context) (int, error) {
return 0, q.err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
qq := countQuery{q}
queryBytes, err := qq.AppendQuery(q.db.fmter, nil)
@ -967,27 +979,27 @@ func (q *SelectQuery) scanAndCountConcurrently(
var mu sync.Mutex
var firstErr error
if q.limit >= 0 {
wg.Add(1)
go func() {
defer wg.Done()
countQuery := q.Clone()
if err := q.Scan(ctx, dest...); err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
}
mu.Unlock()
wg.Add(1)
go func() {
defer wg.Done()
if err := q.Scan(ctx, dest...); err != nil {
mu.Lock()
if firstErr == nil {
firstErr = err
}
}()
}
mu.Unlock()
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var err error
count, err = q.Count(ctx)
count, err = countQuery.Count(ctx)
if err != nil {
mu.Lock()
if firstErr == nil {
@ -1028,6 +1040,9 @@ func (q *SelectQuery) Exists(ctx context.Context) (bool, error) {
}
func (q *SelectQuery) selectExists(ctx context.Context) (bool, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
qq := selectExistsQuery{q}
queryBytes, err := qq.AppendQuery(q.db.fmter, nil)
@ -1047,6 +1062,9 @@ func (q *SelectQuery) selectExists(ctx context.Context) (bool, error) {
}
func (q *SelectQuery) whereExists(ctx context.Context) (bool, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
qq := whereExistsQuery{q}
queryBytes, err := qq.AppendQuery(q.db.fmter, nil)
@ -1077,6 +1095,120 @@ func (q *SelectQuery) String() string {
return string(buf)
}
func (q *SelectQuery) Clone() *SelectQuery {
if q == nil {
return nil
}
cloneArgs := func(args []schema.QueryWithArgs) []schema.QueryWithArgs {
if len(args) == 0 {
return nil
}
clone := make([]schema.QueryWithArgs, len(args))
copy(clone, args)
return clone
}
cloneHints := func(hints *indexHints) *indexHints {
if hints == nil {
return nil
}
return &indexHints{
names: cloneArgs(hints.names),
forJoin: cloneArgs(hints.forJoin),
forOrderBy: cloneArgs(hints.forOrderBy),
forGroupBy: cloneArgs(hints.forGroupBy),
}
}
clone := &SelectQuery{
whereBaseQuery: whereBaseQuery{
baseQuery: baseQuery{
db: q.db,
table: q.table,
model: q.model,
tableModel: q.tableModel,
with: make([]withQuery, len(q.with)),
tables: cloneArgs(q.tables),
columns: cloneArgs(q.columns),
modelTableName: q.modelTableName,
},
where: make([]schema.QueryWithSep, len(q.where)),
},
idxHintsQuery: idxHintsQuery{
use: cloneHints(q.idxHintsQuery.use),
ignore: cloneHints(q.idxHintsQuery.ignore),
force: cloneHints(q.idxHintsQuery.force),
},
orderLimitOffsetQuery: orderLimitOffsetQuery{
order: cloneArgs(q.order),
limit: q.limit,
offset: q.offset,
},
distinctOn: cloneArgs(q.distinctOn),
joins: make([]joinQuery, len(q.joins)),
group: cloneArgs(q.group),
having: cloneArgs(q.having),
union: make([]union, len(q.union)),
comment: q.comment,
}
for i, w := range q.with {
clone.with[i] = withQuery{
name: w.name,
recursive: w.recursive,
query: w.query, // TODO: maybe clone is need
}
}
if !q.modelTableName.IsZero() {
clone.modelTableName = schema.SafeQuery(
q.modelTableName.Query,
append([]any(nil), q.modelTableName.Args...),
)
}
for i, w := range q.where {
clone.where[i] = schema.SafeQueryWithSep(
w.Query,
append([]any(nil), w.Args...),
w.Sep,
)
}
for i, j := range q.joins {
clone.joins[i] = joinQuery{
join: schema.SafeQuery(j.join.Query, append([]any(nil), j.join.Args...)),
on: make([]schema.QueryWithSep, len(j.on)),
}
for k, on := range j.on {
clone.joins[i].on[k] = schema.SafeQueryWithSep(
on.Query,
append([]any(nil), on.Args...),
on.Sep,
)
}
}
for i, u := range q.union {
clone.union[i] = union{
expr: u.expr,
query: u.query.Clone(),
}
}
if !q.selFor.IsZero() {
clone.selFor = schema.SafeQuery(
q.selFor.Query,
append([]any(nil), q.selFor.Args...),
)
}
return clone
}
//------------------------------------------------------------------------------
func (q *SelectQuery) QueryBuilder() QueryBuilder {

View file

@ -358,6 +358,9 @@ func (q *CreateTableQuery) Exec(ctx context.Context, dest ...interface{}) (sql.R
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -123,6 +123,9 @@ func (q *DropTableQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Res
}
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -136,6 +136,9 @@ func (q *TruncateTableQuery) AppendQuery(
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) Exec(ctx context.Context, dest ...interface{}) (sql.Result, error) {
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {
return nil, err

View file

@ -123,7 +123,7 @@ func (q *UpdateQuery) SetColumn(column string, query string, args ...interface{}
// Value overwrites model value for the column.
func (q *UpdateQuery) Value(column string, query string, args ...interface{}) *UpdateQuery {
if q.table == nil {
q.err = errNilModel
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, query, args)
@ -154,7 +154,7 @@ func (q *UpdateQuery) JoinOnOr(cond string, args ...interface{}) *UpdateQuery {
func (q *UpdateQuery) joinOn(cond string, args []interface{}, sep string) *UpdateQuery {
if len(q.joins) == 0 {
q.err = errors.New("bun: query has no joins")
q.setErr(errors.New("bun: query has no joins"))
return q
}
j := &q.joins[len(q.joins)-1]
@ -206,7 +206,7 @@ func (q *UpdateQuery) WhereAllWithDeleted() *UpdateQuery {
// ------------------------------------------------------------------------------
func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.addOrder(orders...)
@ -215,7 +215,7 @@ func (q *UpdateQuery) Order(orders ...string) *UpdateQuery {
func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.addOrderExpr(query, args...)
@ -224,7 +224,7 @@ func (q *UpdateQuery) OrderExpr(query string, args ...interface{}) *UpdateQuery
func (q *UpdateQuery) Limit(n int) *UpdateQuery {
if !q.hasFeature(feature.UpdateOrderLimit) {
q.err = feature.NewNotSupportError(feature.UpdateOrderLimit)
q.setErr(feature.NewNotSupportError(feature.UpdateOrderLimit))
return q
}
q.setLimit(n)
@ -556,6 +556,9 @@ func (q *UpdateQuery) scanOrExec(
return nil, err
}
// if a comment is propagated via the context, use it
setCommentFromContext(ctx, q)
// Generate the query before checking hasReturning.
queryBytes, err := q.AppendQuery(q.db.fmter, q.db.makeQueryBytes())
if err != nil {

View file

@ -52,7 +52,7 @@ func (q *ValuesQuery) Column(columns ...string) *ValuesQuery {
// Value overwrites model value for the column.
func (q *ValuesQuery) Value(column string, expr string, args ...interface{}) *ValuesQuery {
if q.table == nil {
q.err = errNilModel
q.setErr(errNilModel)
return q
}
q.addValue(q.table, column, expr, args)

View file

@ -10,6 +10,7 @@
)
type Field struct {
Table *Table // Contains this field
StructField reflect.StructField
IsPtr bool

View file

@ -13,12 +13,12 @@
)
type Relation struct {
Type int
Field *Field // Has the bun tag defining this relation.
// Base and Join can be explained with this query:
//
// SELECT * FROM base_table JOIN join_table
Type int
Field *Field
JoinTable *Table
BasePKs []*Field
JoinPKs []*Field
@ -34,10 +34,49 @@ type Relation struct {
M2MJoinPKs []*Field
}
// References returns true if the table to which the Relation belongs needs to declare a foreign key constraint to create the relation.
// For other relations, the constraint is created in either the referencing table (1:N, 'has-many' relations) or a mapping table (N:N, 'm2m' relations).
// References returns true if the table which defines this Relation
// needs to declare a foreign key constraint, as is the case
// for 'has-one' and 'belongs-to' relations. For other relations,
// the constraint is created either in the referencing table (1:N, 'has-many' relations)
// or the junction table (N:N, 'm2m' relations).
//
// Usage of `rel:` tag does not always imply creation of foreign keys (when WithForeignKeys() is not set)
// and can be used exclusively for joining tables at query time. For example:
//
// type User struct {
// ID int64 `bun:",pk"`
// Profile *Profile `bun:",rel:has-one,join:id=user_id"`
// }
//
// Creating a FK users.id -> profiles.user_id would be confusing and incorrect,
// so for such cases References() returns false. One notable exception to this rule
// is when a Relation is defined in a junction table, in which case it is perfectly
// fine for its primary keys to reference other tables. Consider:
//
// // UsersToGroups maps users to groups they follow.
// type UsersToGroups struct {
// UserID string `bun:"user_id,pk"` // Needs FK to users.id
// GroupID string `bun:"group_id,pk"` // Needs FK to groups.id
//
// User *User `bun:"rel:belongs-to,join:user_id=id"`
// Group *Group `bun:"rel:belongs-to,join:group_id=id"`
// }
//
// Here BooksToReaders has a composite primary key, composed of other primary keys.
func (r *Relation) References() bool {
return r.Type == HasOneRelation || r.Type == BelongsToRelation
allPK := true
nonePK := true
for _, f := range r.BasePKs {
allPK = allPK && f.IsPK
nonePK = nonePK && !f.IsPK
}
// Erring on the side of caution, only create foreign keys
// if the referencing columns are part of a composite PK
// in the junction table of the m2m relationship.
effectsM2M := r.Field.Table.IsM2MTable && allPK
return (r.Type == HasOneRelation || r.Type == BelongsToRelation) && (effectsM2M || nonePK)
}
func (r *Relation) String() string {

View file

@ -50,6 +50,7 @@ func (s Ident) AppendQuery(fmter Formatter, b []byte) ([]byte, error) {
//------------------------------------------------------------------------------
// NOTE: It should not be modified after creation.
type QueryWithArgs struct {
Query string
Args []interface{}

View file

@ -62,8 +62,9 @@ type Table struct {
FieldMap map[string]*Field
StructMap map[string]*structField
Relations map[string]*Relation
Unique map[string][]*Field
IsM2MTable bool // If true, this table is the "junction table" of an m2m relation.
Relations map[string]*Relation
Unique map[string][]*Field
SoftDeleteField *Field
UpdateSoftDeleteField func(fv reflect.Value, tm time.Time) error
@ -122,6 +123,7 @@ type embeddedField struct {
names := make(map[string]struct{})
embedded := make([]embeddedField, 0, 10)
ebdStructs := make(map[string]*structField, 0)
for i, n := 0, typ.NumField(); i < n; i++ {
sf := typ.Field(i)
@ -163,6 +165,17 @@ type embeddedField struct {
subfield: subfield,
})
}
if len(subtable.StructMap) > 0 {
for k, v := range subtable.StructMap {
// NOTE: conflict Struct name
if _, ok := ebdStructs[k]; !ok {
ebdStructs[k] = &structField{
Index: makeIndex(sf.Index, v.Index),
Table: subtable,
}
}
}
}
if tagstr != "" {
tag := tagparser.Parse(tagstr)
@ -197,6 +210,18 @@ type embeddedField struct {
subfield: subfield,
})
}
if len(subtable.StructMap) > 0 {
for k, v := range subtable.StructMap {
// NOTE: conflict Struct name
k = prefix + k
if _, ok := ebdStructs[k]; !ok {
ebdStructs[k] = &structField{
Index: makeIndex(sf.Index, v.Index),
Table: subtable,
}
}
}
}
continue
}
@ -252,6 +277,15 @@ type embeddedField struct {
}
}
if len(ebdStructs) > 0 && t.StructMap == nil {
t.StructMap = make(map[string]*structField)
}
for name, sfield := range ebdStructs {
if _, ok := t.StructMap[name]; !ok {
t.StructMap[name] = sfield
}
}
if len(embedded) > 0 {
// https://github.com/uptrace/bun/issues/1095
// < v1.2, all fields follow the order corresponding to the struct
@ -483,6 +517,7 @@ func (t *Table) newField(sf reflect.StructField, tag tagparser.Tag) *Field {
}
field := &Field{
Table: t,
StructField: sf,
IsPtr: sf.Type.Kind() == reflect.Ptr,
@ -862,6 +897,7 @@ func (t *Table) m2mRelation(field *Field) *Relation {
JoinTable: joinTable,
M2MTable: m2mTable,
}
m2mTable.markM2M()
if field.Tag.HasOption("join_on") {
rel.Condition = field.Tag.Options["join_on"]
@ -907,6 +943,10 @@ func (t *Table) m2mRelation(field *Field) *Relation {
return rel
}
func (t *Table) markM2M() {
t.IsM2MTable = true
}
//------------------------------------------------------------------------------
func (t *Table) Dialect() Dialect { return t.dialect }

View file

@ -1,6 +1,7 @@
package bun
import (
"context"
"fmt"
"reflect"
"strings"
@ -86,3 +87,26 @@ func appendComment(b []byte, name string) []byte {
name = strings.ReplaceAll(name, `*/`, `*\/`)
return append(b, fmt.Sprintf("/* %s */ ", name)...)
}
// queryCommentCtxKey is a context key for setting a query comment on a context instead of calling the Comment("...") API directly
type queryCommentCtxKey struct{}
// WithComment returns a context that includes a comment that may be included in a query for debugging
//
// If a context with an attached query is used, a comment set by the Comment("...") API will be overwritten.
func WithComment(ctx context.Context, comment string) context.Context {
return context.WithValue(ctx, queryCommentCtxKey{}, comment)
}
// commenter describes the Comment interface implemented by all of the query types
type commenter[T any] interface {
Comment(string) T
}
// setCommentFromContext sets the comment on the given query from the supplied context if one is set using the Comment(...) method.
func setCommentFromContext[T any](ctx context.Context, q commenter[T]) {
s, _ := ctx.Value(queryCommentCtxKey{}).(string)
if s != "" {
q.Comment(s)
}
}

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.9"
return "1.2.10"
}

10
vendor/modules.txt vendored
View file

@ -754,7 +754,7 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
# github.com/puzpuzpuz/xsync/v3 v3.5.0
# github.com/puzpuzpuz/xsync/v3 v3.5.1
## explicit; go 1.18
github.com/puzpuzpuz/xsync/v3
# github.com/quasoft/memstore v0.0.0-20191010062613-2bce066d2b0b
@ -911,7 +911,7 @@ github.com/ugorji/go/codec
github.com/ulule/limiter/v3
github.com/ulule/limiter/v3/drivers/store/common
github.com/ulule/limiter/v3/drivers/store/memory
# github.com/uptrace/bun v1.2.9
# github.com/uptrace/bun v1.2.10
## explicit; go 1.22.0
github.com/uptrace/bun
github.com/uptrace/bun/dialect
@ -925,13 +925,13 @@ github.com/uptrace/bun/internal/tagparser
github.com/uptrace/bun/migrate
github.com/uptrace/bun/migrate/sqlschema
github.com/uptrace/bun/schema
# github.com/uptrace/bun/dialect/pgdialect v1.2.9
# github.com/uptrace/bun/dialect/pgdialect v1.2.10
## explicit; go 1.22.0
github.com/uptrace/bun/dialect/pgdialect
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.9
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.10
## explicit; go 1.22.0
github.com/uptrace/bun/dialect/sqlitedialect
# github.com/uptrace/bun/extra/bunotel v1.2.9
# github.com/uptrace/bun/extra/bunotel v1.2.10
## explicit; go 1.22.0
github.com/uptrace/bun/extra/bunotel
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2