bump uptrace/bun dependencies from 1.2.6 to 1.2.8 (#3645)

This commit is contained in:
kim 2025-01-14 14:23:28 +00:00 committed by GitHub
parent e77c7e16b6
commit b8ef9fc4bc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
89 changed files with 907 additions and 4123 deletions

19
go.mod
View file

@ -76,20 +76,20 @@ require (
github.com/tetratelabs/wazero v1.8.2
github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80
github.com/ulule/limiter/v3 v3.11.2
github.com/uptrace/bun v1.2.6
github.com/uptrace/bun/dialect/pgdialect v1.2.6
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6
github.com/uptrace/bun/extra/bunotel v1.2.6
github.com/uptrace/bun v1.2.8
github.com/uptrace/bun/dialect/pgdialect v1.2.8
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8
github.com/uptrace/bun/extra/bunotel v1.2.8
github.com/wagslane/go-password-validator v0.3.0
github.com/yuin/goldmark v1.7.8
go.opentelemetry.io/otel v1.32.0
go.opentelemetry.io/otel v1.33.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0
go.opentelemetry.io/otel/exporters/prometheus v0.51.0
go.opentelemetry.io/otel/metric v1.32.0
go.opentelemetry.io/otel/metric v1.33.0
go.opentelemetry.io/otel/sdk v1.32.0
go.opentelemetry.io/otel/sdk/metric v1.32.0
go.opentelemetry.io/otel/trace v1.32.0
go.opentelemetry.io/otel/trace v1.33.0
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.31.0
golang.org/x/image v0.23.0
@ -111,9 +111,7 @@ require (
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/bytedance/sonic v1.12.6 // indirect
github.com/bytedance/sonic/loader v0.2.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
@ -226,7 +224,6 @@ require (
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41 // indirect
go.mongodb.org/mongo-driver v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
@ -235,7 +232,7 @@ require (
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/tools v0.28.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect

26
go.sum generated
View file

@ -97,14 +97,10 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk=
github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
@ -583,14 +579,14 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulule/limiter/v3 v3.11.2 h1:P4yOrxoEMJbOTfRJR2OzjL90oflzYPPmWg+dvwN2tHA=
github.com/ulule/limiter/v3 v3.11.2/go.mod h1:QG5GnFOCV+k7lrL5Y8kgEeeflPH3+Cviqlqa8SVSQxI=
github.com/uptrace/bun v1.2.6 h1:lyGBQAhNiClchb97HA2cBnDeRxwTRLhSIgiFPXVisV8=
github.com/uptrace/bun v1.2.6/go.mod h1:xMgnVFf+/5xsrFBU34HjDJmzZnXbVuNEt/Ih56I8qBU=
github.com/uptrace/bun/dialect/pgdialect v1.2.6 h1:iNd1YLx619K+sZK+dRcWPzluurXYK1QwIkp9FEfNB/8=
github.com/uptrace/bun/dialect/pgdialect v1.2.6/go.mod h1:OL7d3qZLxKYP8kxNhMg3IheN1pDR3UScGjoUP+ivxJQ=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6 h1:p8vA39kR9Ypw0so+gUhFhd8NOufx3MzvoxJeUpwieQU=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.6/go.mod h1:sdGy8eCv9WVGDrPhagE9i7FASeyj3BFkHzkRMF/qK3w=
github.com/uptrace/bun/extra/bunotel v1.2.6 h1:6m90acv9hsDuTYRo3oiKCWMatGPmi+feKAx8Y/GPj9A=
github.com/uptrace/bun/extra/bunotel v1.2.6/go.mod h1:QGqnFNJ2H88juh7DmgdPJZVN9bSTpj7UaGllSO9JDKk=
github.com/uptrace/bun v1.2.8 h1:HEiLvy9wc7ehU5S02+O6NdV5BLz48lL4REPhTkMX3Dg=
github.com/uptrace/bun v1.2.8/go.mod h1:JBq0uBKsKqNT0Ccce1IAFZY337Wkf08c6F6qlmfOHE8=
github.com/uptrace/bun/dialect/pgdialect v1.2.8 h1:9n3qVh6yc+u7F3lpXzsWrAFJG1yLHUC2thjCCVEDpM8=
github.com/uptrace/bun/dialect/pgdialect v1.2.8/go.mod h1:plksD43MjAlPGYLD9/SzsLUpGH5poXE9IB1+ka/sEzE=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8 h1:Huqw7YhLFTbocbSv8NETYYXqKtwLa6XsciCWtjzWSWU=
github.com/uptrace/bun/dialect/sqlitedialect v1.2.8/go.mod h1:ni7h2uwIc5zPhxgmCMTEbefONc4XsVr/ATfz1Q7d3CE=
github.com/uptrace/bun/extra/bunotel v1.2.8 h1:mu98xQ2EcmkeNGT+YjVtMludtZNHfhfHqhrS77mk4YM=
github.com/uptrace/bun/extra/bunotel v1.2.8/go.mod h1:NSjzSfYdDg0WSiY54pFp4ykGoGUmbc/xYQ7AsdyslHQ=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2 h1:ZjUj9BLYf9PEqBn8W/OapxhPjVRdC6CsXTdULHsyk5c=
github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2/go.mod h1:O8bHQfyinKwTXKkiKNGmLQS7vRsqRxIQTFZpYpHK3IQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
@ -605,8 +601,6 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
github.com/wagslane/go-password-validator v0.3.0/go.mod h1:TI1XJ6T5fRdRnHqHt14pvy1tNVnrwe7m3/f1f2fDphQ=
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41 h1:rnB8ZLMeAr3VcqjfRkAm27qb8y6zFKNfuHvy1Gfe7KI=
github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41/go.mod h1:DbzwytT4g/odXquuOCqroKvtxxldI4nb3nuesHF/Exo=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
@ -803,8 +797,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=

View file

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,5 +0,0 @@
# generic-list-go [![CI](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml/badge.svg)](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml)
Go [container/list](https://pkg.go.dev/container/list) but with generics.
The code is based on `container/list` in `go1.18beta2`.

View file

@ -1,235 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package list implements a doubly linked list.
//
// To iterate over a list (where l is a *List):
// for e := l.Front(); e != nil; e = e.Next() {
// // do something with e.Value
// }
//
package list
// Element is an element of a linked list.
type Element[T any] struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *Element[T]
// The list to which this element belongs.
list *List[T]
// The value stored with this element.
Value T
}
// Next returns the next list element or nil.
func (e *Element[T]) Next() *Element[T] {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// Prev returns the previous list element or nil.
func (e *Element[T]) Prev() *Element[T] {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// List represents a doubly linked list.
// The zero value for List is an empty list ready to use.
type List[T any] struct {
root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *List[T]) Init() *List[T] {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// New returns an initialized list.
func New[T any]() *List[T] { return new(List[T]).Init() }
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *List[T]) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *List[T]) Front() *Element[T] {
if l.len == 0 {
return nil
}
return l.root.next
}
// Back returns the last element of list l or nil if the list is empty.
func (l *List[T]) Back() *Element[T] {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List value.
func (l *List[T]) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
return l.insert(&Element[T]{Value: v}, at)
}
// remove removes e from its list, decrements l.len
func (l *List[T]) remove(e *Element[T]) {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
}
// move moves e to next to at.
func (l *List[T]) move(e, at *Element[T]) {
if e == at {
return
}
e.prev.next = e.next
e.next.prev = e.prev
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
func (l *List[T]) Remove(e *Element[T]) T {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
return e.Value
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *List[T]) PushFront(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
func (l *List[T]) PushBack(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark.prev)
}
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List[T]) MoveToFront(e *Element[T]) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List[T]) MoveToBack(e *Element[T]) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List[T]) MoveBefore(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List[T]) MoveAfter(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark)
}
// PushBackList inserts a copy of another list at the back of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List[T]) PushBackList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
// PushFrontList inserts a copy of another list at the front of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List[T]) PushFrontList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
}
}

View file

@ -1,12 +0,0 @@
*.test
*.out
*.mprof
.idea
vendor/github.com/buger/goterm/
prof.cpu
prof.mem

View file

@ -1,11 +0,0 @@
language: go
arch:
- amd64
- ppc64le
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
script: go test -v ./.

View file

@ -1,12 +0,0 @@
FROM golang:1.6
RUN go get github.com/Jeffail/gabs
RUN go get github.com/bitly/go-simplejson
RUN go get github.com/pquerna/ffjson
RUN go get github.com/antonholmquist/jason
RUN go get github.com/mreiferson/go-ujson
RUN go get -tags=unsafe -u github.com/ugorji/go/codec
RUN go get github.com/mailru/easyjson
WORKDIR /go/src/github.com/buger/jsonparser
ADD . /go/src/github.com/buger/jsonparser

View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2016 Leonid Bugaev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,36 +0,0 @@
SOURCE = parser.go
CONTAINER = jsonparser
SOURCE_PATH = /go/src/github.com/buger/jsonparser
BENCHMARK = JsonParser
BENCHTIME = 5s
TEST = .
DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER)
build:
docker build -t $(CONTAINER) .
race:
$(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s
bench:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v
bench_local:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v
profile:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c
test:
$(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v
fmt:
$(DRUN) go fmt ./...
vet:
$(DRUN) go vet ./.
bash:
$(DRUN) /bin/bash

View file

@ -1,365 +0,0 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg)
# Alternative JSON parser for Go (10x times faster standard library)
It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below.
## Rationale
Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex.
I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage.
I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures.
Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience.
## Example
For the given JSON our goal is to extract the user's full name, number of github followers and avatar.
```go
import "github.com/buger/jsonparser"
...
data := []byte(`{
"person": {
"name": {
"first": "Leonid",
"last": "Bugaev",
"fullName": "Leonid Bugaev"
},
"github": {
"handle": "buger",
"followers": 109
},
"avatars": [
{ "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" }
]
},
"company": {
"name": "Acme"
}
}`)
// You can specify key path by providing arguments to Get function
jsonparser.Get(data, "person", "name", "fullName")
// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type
jsonparser.GetInt(data, "person", "github", "followers")
// When you try to get object, it will return you []byte slice pointer to data containing it
// In `company` it will be `{"name": "Acme"}`
jsonparser.Get(data, "company")
// If the key doesn't exist it will throw an error
var size int64
if value, err := jsonparser.GetInt(data, "company", "size"); err == nil {
size = value
}
// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN]
jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
fmt.Println(jsonparser.Get(value, "url"))
}, "person", "avatars")
// Or use can access fields by index!
jsonparser.GetString(data, "person", "avatars", "[0]", "url")
// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN }
jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType)
return nil
}, "person", "name")
// The most efficient way to extract multiple keys is `EachKey`
paths := [][]string{
[]string{"person", "name", "fullName"},
[]string{"person", "avatars", "[0]", "url"},
[]string{"company", "url"},
}
jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){
switch idx {
case 0: // []string{"person", "name", "fullName"}
...
case 1: // []string{"person", "avatars", "[0]", "url"}
...
case 2: // []string{"company", "url"},
...
}
}, paths...)
// For more information see docs below
```
## Need to speedup your app?
I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com.
## Reference
Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it.
You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser)
### **`Get`**
```go
func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error)
```
Receives data structure, and key path to extract value from.
Returns:
* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error
* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null`
* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper.
* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist`
Accepts multiple keys to specify path to JSON value (in case of quering nested structures).
If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation.
Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah?
### **`GetString`**
```go
func GetString(data []byte, keys ...string) (val string, err error)
```
Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations.
### **`GetUnsafeString`**
If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations:
```go
s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title")
switch s {
case 'CEO':
...
case 'Engineer'
...
...
}
```
Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way.
### **`GetBoolean`**, **`GetInt`** and **`GetFloat`**
```go
func GetBoolean(data []byte, keys ...string) (val bool, err error)
func GetFloat(data []byte, keys ...string) (val float64, err error)
func GetInt(data []byte, keys ...string) (val int64, err error)
```
If you know the key type, you can use the helpers above.
If key data type do not match, it will return error.
### **`ArrayEach`**
```go
func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string)
```
Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`.
### **`ObjectEach`**
```go
func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error)
```
Needed for iterating object, accepts a callback function. Example:
```go
var handler func([]byte, []byte, jsonparser.ValueType, int) error
handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
//do stuff here
}
jsonparser.ObjectEach(myJson, handler)
```
### **`EachKey`**
```go
func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string)
```
When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well!
```go
paths := [][]string{
[]string{"uuid"},
[]string{"tz"},
[]string{"ua"},
[]string{"st"},
}
var data SmallPayload
jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){
switch idx {
case 0:
data.Uuid, _ = value
case 1:
v, _ := jsonparser.ParseInt(value)
data.Tz = int(v)
case 2:
data.Ua, _ = value
case 3:
v, _ := jsonparser.ParseInt(value)
data.St = int(v)
}
}, paths...)
```
### **`Set`**
```go
func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error)
```
Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.*
Returns:
* `value` - Pointer to original data structure with updated or added key value.
* `err` - If any parsing issue, it should return error.
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")`
### **`Delete`**
```go
func Delete(data []byte, keys ...string) value []byte
```
Receives existing data structure, and key path to delete. *This functionality is experimental.*
Returns:
* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted.
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")`
## What makes it so fast?
* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`.
* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation.
* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included).
* Does not parse full record, only keys you specified
## Benchmarks
There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads.
For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text.
Benchmarks run on standard Linode 1024 box.
Compared libraries:
* https://golang.org/pkg/encoding/json
* https://github.com/Jeffail/gabs
* https://github.com/a8m/djson
* https://github.com/bitly/go-simplejson
* https://github.com/antonholmquist/jason
* https://github.com/mreiferson/go-ujson
* https://github.com/ugorji/go/codec
* https://github.com/pquerna/ffjson
* https://github.com/mailru/easyjson
* https://github.com/buger/jsonparser
#### TLDR
If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`.
`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers.
`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation).
It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified.
If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`.
`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want.
With great power comes great responsibility! :)
#### Small payload
Each test processes 190 bytes of http log as a JSON record.
It should read multiple fields.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go
Library | time/op | bytes/op | allocs/op
------ | ------- | -------- | -------
encoding/json struct | 7879 | 880 | 18
encoding/json interface{} | 8946 | 1521 | 38
Jeffail/gabs | 10053 | 1649 | 46
bitly/go-simplejson | 10128 | 2241 | 36
antonholmquist/jason | 27152 | 7237 | 101
github.com/ugorji/go/codec | 8806 | 2176 | 31
mreiferson/go-ujson | **7008** | **1409** | 37
a8m/djson | 3862 | 1249 | 30
pquerna/ffjson | **3769** | **624** | **15**
mailru/easyjson | **2002** | **192** | **9**
buger/jsonparser | **1367** | **0** | **0**
buger/jsonparser (EachKey API) | **809** | **0** | **0**
Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson.
If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it.
#### Medium payload
Each test processes a 2.4kb JSON record (based on Clearbit API).
It should read multiple nested fields and 1 array.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go
| Library | time/op | bytes/op | allocs/op |
| ------- | ------- | -------- | --------- |
| encoding/json struct | 57749 | 1336 | 29 |
| encoding/json interface{} | 79297 | 10627 | 215 |
| Jeffail/gabs | 83807 | 11202 | 235 |
| bitly/go-simplejson | 88187 | 17187 | 220 |
| antonholmquist/jason | 94099 | 19013 | 247 |
| github.com/ugorji/go/codec | 114719 | 6712 | 152 |
| mreiferson/go-ujson | **56972** | 11547 | 270 |
| a8m/djson | 28525 | 10196 | 198 |
| pquerna/ffjson | **20298** | **856** | **20** |
| mailru/easyjson | **10512** | **336** | **12** |
| buger/jsonparser | **15955** | **0** | **0** |
| buger/jsonparser (EachKey API) | **8916** | **0** | **0** |
The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload.
`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round.
`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads.
#### Large payload
Each test processes a 24kb JSON record (based on Discourse API)
It should read 2 arrays, and for each item in array get a few fields.
Basically it means processing a full JSON file.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go
| Library | time/op | bytes/op | allocs/op |
| --- | --- | --- | --- |
| encoding/json struct | 748336 | 8272 | 307 |
| encoding/json interface{} | 1224271 | 215425 | 3395 |
| a8m/djson | 510082 | 213682 | 2845 |
| pquerna/ffjson | **312271** | **7792** | **298** |
| mailru/easyjson | **154186** | **6992** | **288** |
| buger/jsonparser | **85308** | **0** | **0** |
`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough)
Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient.
## Questions and support
All bug-reports and suggestions should go though Github Issues.
## Contributing
1. Fork it
2. Create your feature branch (git checkout -b my-new-feature)
3. Commit your changes (git commit -am 'Added some feature')
4. Push to the branch (git push origin my-new-feature)
5. Create new Pull Request
## Development
All my development happens using Docker, and repo include some Make tasks to simplify development.
* `make build` - builds docker image, usually can be called only once
* `make test` - run tests
* `make fmt` - run go fmt
* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file)
* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof`
* `make bash` - enter container (i use it for running `go tool pprof` above)

View file

@ -1,47 +0,0 @@
package jsonparser
import (
bio "bytes"
)
// minInt64 '-9223372036854775808' is the smallest representable number in int64
const minInt64 = `9223372036854775808`
// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON
func parseInt(bytes []byte) (v int64, ok bool, overflow bool) {
if len(bytes) == 0 {
return 0, false, false
}
var neg bool = false
if bytes[0] == '-' {
neg = true
bytes = bytes[1:]
}
var b int64 = 0
for _, c := range bytes {
if c >= '0' && c <= '9' {
b = (10 * v) + int64(c-'0')
} else {
return 0, false, false
}
if overflow = (b < v); overflow {
break
}
v = b
}
if overflow {
if neg && bio.Equal(bytes, []byte(minInt64)) {
return b, true, false
}
return 0, false, true
}
if neg {
return -v, true, false
} else {
return v, true, false
}
}

View file

@ -1,25 +0,0 @@
// +build appengine appenginevm
package jsonparser
import (
"strconv"
)
// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file)
func equalStr(b *[]byte, s string) bool {
return string(*b) == s
}
func parseFloat(b *[]byte) (float64, error) {
return strconv.ParseFloat(string(*b), 64)
}
func bytesToString(b *[]byte) string {
return string(*b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

View file

@ -1,44 +0,0 @@
// +build !appengine,!appenginevm
package jsonparser
import (
"reflect"
"strconv"
"unsafe"
"runtime"
)
//
// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6,
// the compiler cannot perfectly inline the function when using a non-pointer slice. That is,
// the non-pointer []byte parameter version is slower than if its function body is manually
// inlined, whereas the pointer []byte version is equally fast to the manually inlined
// version. Instruction count in assembly taken from "go tool compile" confirms this difference.
//
// TODO: Remove hack after Go 1.7 release
//
func equalStr(b *[]byte, s string) bool {
return *(*string)(unsafe.Pointer(b)) == s
}
func parseFloat(b *[]byte) (float64, error) {
return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64)
}
// A hack until issue golang/go#2632 is fixed.
// See: https://github.com/golang/go/issues/2632
func bytesToString(b *[]byte) string {
return *(*string)(unsafe.Pointer(b))
}
func StringToBytes(s string) []byte {
b := make([]byte, 0, 0)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
bh.Data = sh.Data
bh.Cap = sh.Len
bh.Len = sh.Len
runtime.KeepAlive(s)
return b
}

View file

@ -1,173 +0,0 @@
package jsonparser
import (
"bytes"
"unicode/utf8"
)
// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7
const supplementalPlanesOffset = 0x10000
const highSurrogateOffset = 0xD800
const lowSurrogateOffset = 0xDC00
const basicMultilingualPlaneReservedOffset = 0xDFFF
const basicMultilingualPlaneOffset = 0xFFFF
func combineUTF16Surrogates(high, low rune) rune {
return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset)
}
const badHex = -1
func h2I(c byte) int {
switch {
case c >= '0' && c <= '9':
return int(c - '0')
case c >= 'A' && c <= 'F':
return int(c - 'A' + 10)
case c >= 'a' && c <= 'f':
return int(c - 'a' + 10)
}
return badHex
}
// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and
// is not checked.
// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together.
// This function only handles one; decodeUnicodeEscape handles this more complex case.
func decodeSingleUnicodeEscape(in []byte) (rune, bool) {
// We need at least 6 characters total
if len(in) < 6 {
return utf8.RuneError, false
}
// Convert hex to decimal
h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5])
if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex {
return utf8.RuneError, false
}
// Compose the hex digits
return rune(h1<<12 + h2<<8 + h3<<4 + h4), true
}
// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters,
// which is used to describe UTF16 chars.
// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
func isUTF16EncodedRune(r rune) bool {
return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset
}
func decodeUnicodeEscape(in []byte) (rune, int) {
if r, ok := decodeSingleUnicodeEscape(in); !ok {
// Invalid Unicode escape
return utf8.RuneError, -1
} else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) {
// Valid Unicode escape in Basic Multilingual Plane
return r, 6
} else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain
// UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate"
return utf8.RuneError, -1
} else if r2 < lowSurrogateOffset {
// Invalid UTF16 "low surrogate"
return utf8.RuneError, -1
} else {
// Valid UTF16 surrogate pair
return combineUTF16Surrogates(r, r2), 12
}
}
// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X]
var backslashCharEscapeTable = [...]byte{
'"': '"',
'\\': '\\',
'/': '/',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns
// how many characters were consumed from 'in' and emitted into 'out'.
// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error.
func unescapeToUTF8(in, out []byte) (inLen int, outLen int) {
if len(in) < 2 || in[0] != '\\' {
// Invalid escape due to insufficient characters for any escape or no initial backslash
return -1, -1
}
// https://tools.ietf.org/html/rfc7159#section-7
switch e := in[1]; e {
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
// Valid basic 2-character escapes (use lookup table)
out[0] = backslashCharEscapeTable[e]
return 2, 1
case 'u':
// Unicode escape
if r, inLen := decodeUnicodeEscape(in); inLen == -1 {
// Invalid Unicode escape
return -1, -1
} else {
// Valid Unicode escape; re-encode as UTF8
outLen := utf8.EncodeRune(out, r)
return inLen, outLen
}
}
return -1, -1
}
// unescape unescapes the string contained in 'in' and returns it as a slice.
// If 'in' contains no escaped characters:
// Returns 'in'.
// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)):
// 'out' is used to build the unescaped string and is returned with no extra allocation
// Else:
// A new slice is allocated and returned.
func Unescape(in, out []byte) ([]byte, error) {
firstBackslash := bytes.IndexByte(in, '\\')
if firstBackslash == -1 {
return in, nil
}
// Get a buffer of sufficient size (allocate if needed)
if cap(out) < len(in) {
out = make([]byte, len(in))
} else {
out = out[0:len(in)]
}
// Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice)
copy(out, in[:firstBackslash])
in = in[firstBackslash:]
buf := out[firstBackslash:]
for len(in) > 0 {
// Unescape the next escaped character
inLen, bufLen := unescapeToUTF8(in, buf)
if inLen == -1 {
return nil, MalformedStringEscapeError
}
in = in[inLen:]
buf = buf[bufLen:]
// Copy everything up until the next backslash
nextBackslash := bytes.IndexByte(in, '\\')
if nextBackslash == -1 {
copy(buf, in)
buf = buf[len(in):]
break
} else {
copy(buf, in[:nextBackslash])
buf = buf[nextBackslash:]
in = in[nextBackslash:]
}
}
// Trim the out buffer to the amount that was actually emitted
return out[:len(out)-len(buf)], nil
}

View file

@ -1,117 +0,0 @@
package jsonparser
func FuzzParseString(data []byte) int {
r, err := ParseString(data)
if err != nil || r == "" {
return 0
}
return 1
}
func FuzzEachKey(data []byte) int {
paths := [][]string{
{"name"},
{"order"},
{"nested", "a"},
{"nested", "b"},
{"nested2", "a"},
{"nested", "nested3", "b"},
{"arr", "[1]", "b"},
{"arrInt", "[3]"},
{"arrInt", "[5]"},
{"nested"},
{"arr", "["},
{"a\n", "b\n"},
}
EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...)
return 1
}
func FuzzDelete(data []byte) int {
Delete(data, "test")
return 1
}
func FuzzSet(data []byte) int {
_, err := Set(data, []byte(`"new value"`), "test")
if err != nil {
return 0
}
return 1
}
func FuzzObjectEach(data []byte) int {
_ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error {
return nil
})
return 1
}
func FuzzParseFloat(data []byte) int {
_, err := ParseFloat(data)
if err != nil {
return 0
}
return 1
}
func FuzzParseInt(data []byte) int {
_, err := ParseInt(data)
if err != nil {
return 0
}
return 1
}
func FuzzParseBool(data []byte) int {
_, err := ParseBoolean(data)
if err != nil {
return 0
}
return 1
}
func FuzzTokenStart(data []byte) int {
_ = tokenStart(data)
return 1
}
func FuzzGetString(data []byte) int {
_, err := GetString(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetFloat(data []byte) int {
_, err := GetFloat(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetInt(data []byte) int {
_, err := GetInt(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetBoolean(data []byte) int {
_, err := GetBoolean(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetUnsafeString(data []byte) int {
_, err := GetUnsafeString(data, "test")
if err != nil {
return 0
}
return 1
}

View file

@ -1,47 +0,0 @@
#!/bin/bash -eu
git clone https://github.com/dvyukov/go-fuzz-corpus
zip corpus.zip go-fuzz-corpus/json/corpus/*
cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring
cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey
cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete
cp corpus.zip $OUT/fuzzset_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset
cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach
cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat
cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint
cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool
cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart
cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring
cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat
cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint
cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean
cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,44 @@
## [1.2.8](https://github.com/uptrace/bun/compare/v1.2.7...v1.2.8) (2025-01-06)
### Bug Fixes
* comment string zero bytes filtering ([34dfd68](https://github.com/uptrace/bun/commit/34dfd684e371c24b9f59e9b13ef57660931f0bde))
* get m2m table's structKey with driver.Valuer ([f107314](https://github.com/uptrace/bun/commit/f1073147dc73d01dcf8a6ee9252d354ff06a1062)), closes [#1100](https://github.com/uptrace/bun/issues/1100)
* return error when use dest with has-many/many-to-many ([8296774](https://github.com/uptrace/bun/commit/829677486b502e6d5d2ae37814488ae9f2c7386e)), closes [#606](https://github.com/uptrace/bun/issues/606)
* support scan float32 to float32/float64 ([a52e733](https://github.com/uptrace/bun/commit/a52e7339a93f84468878dcaffc42536faa44efae)), closes [#1087](https://github.com/uptrace/bun/issues/1087)
### Features
* add RelationWithOpts method to SelectQuery ([dd3ef52](https://github.com/uptrace/bun/commit/dd3ef522c8a9c656958b73ee5d546854fb7c6edf))
* enhance debugging by adding query comments in headers ([1376d18](https://github.com/uptrace/bun/commit/1376d1870bfe3d89e3630203787f1e87c503d5df))
* sort fields by struct ([5edb672](https://github.com/uptrace/bun/commit/5edb672e320be9b210f06d25c4f4b9e761c1c526)), closes [#1095](https://github.com/uptrace/bun/issues/1095)
## [1.2.7](https://github.com/uptrace/bun/compare/v1.2.6...v1.2.7) (2025-01-01)
### Bug Fixes
* do not create new migrations if nothing to migrate ([5cc961d](https://github.com/uptrace/bun/commit/5cc961d6cc461ad3534728fc4d3cae12bf8b736e))
* has many relation with driver.Valuer ([cb8c42c](https://github.com/uptrace/bun/commit/cb8c42cd3f65d95865c76a594abad815eea1df3c))
* improve range type to support driver.Valuer and sql.Scanner ([856e12b](https://github.com/uptrace/bun/commit/856e12b0d37275a6aa247370f6a8231fd89ca3e7))
* pass correct 'transactional' parameter ([ebdef1b](https://github.com/uptrace/bun/commit/ebdef1b0e9d33a5ca475ab4c2ec2fb44d11d4595))
* **pgdialect:** remove unsigned integer conversion ([ab3c679](https://github.com/uptrace/bun/commit/ab3c679d529dd20d44e789dc6f1d89f9510bde0b)), closes [uptrace/bun#624](https://github.com/uptrace/bun/issues/624)
* remove unused param on table.go and tables.go: canAddr ([d563e2d](https://github.com/uptrace/bun/commit/d563e2dbe95caeb0e00ad1b3e82283431747fe7b))
* replace the link to docs repo in CONTRIBUTING.md ([e120096](https://github.com/uptrace/bun/commit/e12009662ae1ddefcc1337cc5e32e73d77c7def0))
* trim surrounding '' in string literal in DEFAULT clause ([a0dff72](https://github.com/uptrace/bun/commit/a0dff72b6ab0ca24d00c96c923046200dd6112ed))
### Features
* add an ordered map to remove unnecessary dependencies ([9fea143](https://github.com/uptrace/bun/commit/9fea1437d8344d836670e802fd12d3476e8cad86))
* support disable dialect's feature ([5343bd7](https://github.com/uptrace/bun/commit/5343bd7fc4ceda866a7d607388ebb7a89f7f5823))
## [1.2.6](https://github.com/uptrace/bun/compare/v1.2.5...v1.2.6) (2024-11-20)

View file

@ -31,4 +31,4 @@ TAG=v1.0.0 ./scripts/tag.sh
## Documentation
To contribute to the docs visit https://github.com/go-bun/bun-docs
To contribute to the docs visit https://github.com/uptrace/bun-docs

10
vendor/github.com/uptrace/bun/db.go generated vendored
View file

@ -36,7 +36,6 @@ type DB struct {
*sql.DB
dialect schema.Dialect
features feature.Feature
queryHooks []QueryHook
@ -52,7 +51,6 @@ func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB {
db := &DB{
DB: sqldb,
dialect: dialect,
features: dialect.Features(),
fmter: schema.NewFormatter(dialect),
}
@ -231,7 +229,7 @@ func (db *DB) UpdateFQN(alias, column string) Ident {
// HasFeature uses feature package to report whether the underlying DBMS supports this feature.
func (db *DB) HasFeature(feat feature.Feature) bool {
return db.fmter.HasFeature(feat)
return db.dialect.Features().Has(feat)
}
//------------------------------------------------------------------------------
@ -513,7 +511,7 @@ func (tx Tx) commitTX() error {
}
func (tx Tx) commitSP() error {
if tx.Dialect().Features().Has(feature.MSSavepoint) {
if tx.db.HasFeature(feature.MSSavepoint) {
return nil
}
query := "RELEASE SAVEPOINT " + tx.name
@ -537,7 +535,7 @@ func (tx Tx) rollbackTX() error {
func (tx Tx) rollbackSP() error {
query := "ROLLBACK TO SAVEPOINT " + tx.name
if tx.Dialect().Features().Has(feature.MSSavepoint) {
if tx.db.HasFeature(feature.MSSavepoint) {
query = "ROLLBACK TRANSACTION " + tx.name
}
_, err := tx.ExecContext(tx.ctx, query)
@ -601,7 +599,7 @@ func (tx Tx) BeginTx(ctx context.Context, _ *sql.TxOptions) (Tx, error) {
qName := "SP_" + hex.EncodeToString(sp)
query := "SAVEPOINT " + qName
if tx.Dialect().Features().Has(feature.MSSavepoint) {
if tx.db.HasFeature(feature.MSSavepoint) {
query = "SAVE TRANSACTION " + qName
}
_, err = tx.ExecContext(ctx, query)

View file

@ -11,22 +11,22 @@
)
var (
driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
driverValuerType = reflect.TypeFor[driver.Valuer]()
stringType = reflect.TypeOf((*string)(nil)).Elem()
sliceStringType = reflect.TypeOf([]string(nil))
stringType = reflect.TypeFor[string]()
sliceStringType = reflect.TypeFor[[]string]()
intType = reflect.TypeOf((*int)(nil)).Elem()
sliceIntType = reflect.TypeOf([]int(nil))
intType = reflect.TypeFor[int]()
sliceIntType = reflect.TypeFor[[]int]()
int64Type = reflect.TypeOf((*int64)(nil)).Elem()
sliceInt64Type = reflect.TypeOf([]int64(nil))
int64Type = reflect.TypeFor[int64]()
sliceInt64Type = reflect.TypeFor[[]int64]()
float64Type = reflect.TypeOf((*float64)(nil)).Elem()
sliceFloat64Type = reflect.TypeOf([]float64(nil))
float64Type = reflect.TypeFor[float64]()
sliceFloat64Type = reflect.TypeFor[[]float64]()
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
sliceTimeType = reflect.TypeOf([]time.Time(nil))
timeType = reflect.TypeFor[time.Time]()
sliceTimeType = reflect.TypeFor[[]time.Time]()
)
func appendTime(buf []byte, tm time.Time) []byte {
@ -67,9 +67,9 @@ func appendMapStringString(b []byte, m map[string]string) []byte {
b = append(b, '\'')
for key, value := range m {
b = arrayAppendString(b, key)
b = appendStringElem(b, key)
b = append(b, '=', '>')
b = arrayAppendString(b, value)
b = appendStringElem(b, value)
b = append(b, ',')
}
if len(m) > 0 {

View file

@ -3,13 +3,11 @@
import (
"database/sql"
"database/sql/driver"
"encoding/hex"
"fmt"
"math"
"reflect"
"strconv"
"time"
"unicode/utf8"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/internal"
@ -146,44 +144,21 @@ func (d *Dialect) arrayElemAppender(typ reflect.Type) schema.AppenderFunc {
}
switch typ.Kind() {
case reflect.String:
return arrayAppendStringValue
return appendStringElemValue
case reflect.Slice:
if typ.Elem().Kind() == reflect.Uint8 {
return arrayAppendBytesValue
return appendBytesElemValue
}
}
return schema.Appender(d, typ)
}
func arrayAppend(fmter schema.Formatter, b []byte, v interface{}) []byte {
switch v := v.(type) {
case int64:
return strconv.AppendInt(b, v, 10)
case float64:
return arrayAppendFloat64(b, v)
case bool:
return dialect.AppendBool(b, v)
case []byte:
return arrayAppendBytes(b, v)
case string:
return arrayAppendString(b, v)
case time.Time:
b = append(b, '"')
b = appendTime(b, v)
b = append(b, '"')
return b
default:
err := fmt.Errorf("pgdialect: can't append %T", v)
return dialect.AppendError(b, err)
}
func appendStringElemValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return appendStringElem(b, v.String())
}
func arrayAppendStringValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return arrayAppendString(b, v.String())
}
func arrayAppendBytesValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return arrayAppendBytes(b, v.Bytes())
func appendBytesElemValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
return appendBytesElem(b, v.Bytes())
}
func arrayAppendDriverValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
@ -191,7 +166,7 @@ func arrayAppendDriverValue(fmter schema.Formatter, b []byte, v reflect.Value) [
if err != nil {
return dialect.AppendError(b, err)
}
return arrayAppend(fmter, b, iface)
return appendElem(b, iface)
}
func appendStringSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
@ -208,7 +183,7 @@ func appendStringSlice(b []byte, ss []string) []byte {
b = append(b, '{')
for _, s := range ss {
b = arrayAppendString(b, s)
b = appendStringElem(b, s)
b = append(b, ',')
}
if len(ss) > 0 {
@ -496,7 +471,7 @@ func decodeIntSlice(src interface{}) ([]int, error) {
continue
}
n, err := strconv.Atoi(bytesToString(elem))
n, err := strconv.Atoi(internal.String(elem))
if err != nil {
return nil, err
}
@ -545,7 +520,7 @@ func decodeInt64Slice(src interface{}) ([]int64, error) {
continue
}
n, err := strconv.ParseInt(bytesToString(elem), 10, 64)
n, err := strconv.ParseInt(internal.String(elem), 10, 64)
if err != nil {
return nil, err
}
@ -594,7 +569,7 @@ func scanFloat64Slice(src interface{}) ([]float64, error) {
continue
}
n, err := strconv.ParseFloat(bytesToString(elem), 64)
n, err := strconv.ParseFloat(internal.String(elem), 64)
if err != nil {
return nil, err
}
@ -610,57 +585,10 @@ func scanFloat64Slice(src interface{}) ([]float64, error) {
func toBytes(src interface{}) ([]byte, error) {
switch src := src.(type) {
case string:
return stringToBytes(src), nil
return internal.Bytes(src), nil
case []byte:
return src, nil
default:
return nil, fmt.Errorf("pgdialect: got %T, wanted []byte or string", src)
}
}
//------------------------------------------------------------------------------
func arrayAppendBytes(b []byte, bs []byte) []byte {
if bs == nil {
return dialect.AppendNull(b)
}
b = append(b, `"\\x`...)
s := len(b)
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
hex.Encode(b[s:], bs)
b = append(b, '"')
return b
}
func arrayAppendString(b []byte, s string) []byte {
b = append(b, '"')
for _, r := range s {
switch r {
case 0:
// ignore
case '\'':
b = append(b, "''"...)
case '"':
b = append(b, '\\', '"')
case '\\':
b = append(b, '\\', '\\')
default:
if r < utf8.RuneSelf {
b = append(b, byte(r))
break
}
l := len(b)
if cap(b)-l < utf8.UTFMax {
b = append(b, make([]byte, utf8.UTFMax)...)
}
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
b = b[:l+n]
}
}
b = append(b, '"')
return b
}

View file

@ -1 +0,0 @@
package pgdialect

View file

@ -3,7 +3,6 @@
import (
"database/sql"
"fmt"
"strconv"
"strings"
"github.com/uptrace/bun"
@ -34,7 +33,7 @@ type Dialect struct {
var _ sqlschema.InspectorDialect = (*Dialect)(nil)
var _ sqlschema.MigratorDialect = (*Dialect)(nil)
func New() *Dialect {
func New(opts ...DialectOption) *Dialect {
d := new(Dialect)
d.tables = schema.NewTables(d)
d.features = feature.CTE |
@ -55,9 +54,22 @@ func New() *Dialect {
feature.GeneratedIdentity |
feature.CompositeIn |
feature.DeleteReturning
for _, opt := range opts {
opt(d)
}
return d
}
type DialectOption func(d *Dialect)
func WithoutFeature(other feature.Feature) DialectOption {
return func(d *Dialect) {
d.features = d.features.Remove(other)
}
}
func (d *Dialect) Init(*sql.DB) {}
func (d *Dialect) Name() dialect.Name {
@ -115,14 +127,6 @@ func (d *Dialect) IdentQuote() byte {
return '"'
}
func (d *Dialect) AppendUint32(b []byte, n uint32) []byte {
return strconv.AppendInt(b, int64(int32(n)), 10)
}
func (d *Dialect) AppendUint64(b []byte, n uint64) []byte {
return strconv.AppendInt(b, int64(n), 10)
}
func (d *Dialect) AppendSequence(b []byte, _ *schema.Table, _ *schema.Field) []byte {
return appendGeneratedAsIdentity(b)
}

View file

@ -0,0 +1,87 @@
package pgdialect
import (
"database/sql/driver"
"encoding/hex"
"fmt"
"strconv"
"time"
"unicode/utf8"
"github.com/uptrace/bun/dialect"
)
func appendElem(buf []byte, val interface{}) []byte {
switch val := val.(type) {
case int64:
return strconv.AppendInt(buf, val, 10)
case float64:
return arrayAppendFloat64(buf, val)
case bool:
return dialect.AppendBool(buf, val)
case []byte:
return appendBytesElem(buf, val)
case string:
return appendStringElem(buf, val)
case time.Time:
buf = append(buf, '"')
buf = appendTime(buf, val)
buf = append(buf, '"')
return buf
case driver.Valuer:
val2, err := val.Value()
if err != nil {
err := fmt.Errorf("pgdialect: can't append elem value: %w", err)
return dialect.AppendError(buf, err)
}
return appendElem(buf, val2)
default:
err := fmt.Errorf("pgdialect: can't append elem %T", val)
return dialect.AppendError(buf, err)
}
}
func appendBytesElem(b []byte, bs []byte) []byte {
if bs == nil {
return dialect.AppendNull(b)
}
b = append(b, `"\\x`...)
s := len(b)
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
hex.Encode(b[s:], bs)
b = append(b, '"')
return b
}
func appendStringElem(b []byte, s string) []byte {
b = append(b, '"')
for _, r := range s {
switch r {
case 0:
// ignore
case '\'':
b = append(b, "''"...)
case '"':
b = append(b, '\\', '"')
case '\\':
b = append(b, '\\', '\\')
default:
if r < utf8.RuneSelf {
b = append(b, byte(r))
break
}
l := len(b)
if cap(b)-l < utf8.UTFMax {
b = append(b, make([]byte, utf8.UTFMax)...)
}
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
b = b[:l+n]
}
}
b = append(b, '"')
return b
}

View file

@ -5,8 +5,8 @@
"strings"
"github.com/uptrace/bun"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/migrate/sqlschema"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
type (
@ -34,7 +34,7 @@ func newInspector(db *bun.DB, options ...sqlschema.InspectorOption) *Inspector {
func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
dbSchema := Schema{
Tables: orderedmap.New[string, sqlschema.Table](),
Tables: ordered.NewMap[string, sqlschema.Table](),
ForeignKeys: make(map[sqlschema.ForeignKey]string),
}
@ -61,7 +61,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
return dbSchema, err
}
colDefs := orderedmap.New[string, sqlschema.Column]()
colDefs := ordered.NewMap[string, sqlschema.Column]()
uniqueGroups := make(map[string][]string)
for _, c := range columns {
@ -72,7 +72,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
def = strings.ToLower(def)
}
colDefs.Set(c.Name, &Column{
colDefs.Store(c.Name, &Column{
Name: c.Name,
SQLType: c.DataType,
VarcharLen: c.VarcharLen,
@ -103,7 +103,7 @@ func (in *Inspector) Inspect(ctx context.Context) (sqlschema.Database, error) {
}
}
dbSchema.Tables.Set(table.Name, &Table{
dbSchema.Tables.Store(table.Name, &Table{
Schema: table.Schema,
Name: table.Name,
Columns: colDefs,

View file

@ -0,0 +1,107 @@
package pgdialect
import (
"bytes"
"encoding/hex"
"github.com/uptrace/bun/internal/parser"
)
type pgparser struct {
parser.Parser
buf []byte
}
func newParser(b []byte) *pgparser {
p := new(pgparser)
p.Reset(b)
return p
}
func (p *pgparser) ReadLiteral(ch byte) []byte {
p.Unread()
lit, _ := p.ReadSep(',')
return lit
}
func (p *pgparser) ReadUnescapedSubstring(ch byte) ([]byte, error) {
return p.readSubstring(ch, false)
}
func (p *pgparser) ReadSubstring(ch byte) ([]byte, error) {
return p.readSubstring(ch, true)
}
func (p *pgparser) readSubstring(ch byte, escaped bool) ([]byte, error) {
ch, err := p.ReadByte()
if err != nil {
return nil, err
}
p.buf = p.buf[:0]
for {
if ch == '"' {
break
}
next, err := p.ReadByte()
if err != nil {
return nil, err
}
if ch == '\\' {
switch next {
case '\\', '"':
p.buf = append(p.buf, next)
ch, err = p.ReadByte()
if err != nil {
return nil, err
}
default:
p.buf = append(p.buf, '\\')
ch = next
}
continue
}
if escaped && ch == '\'' && next == '\'' {
p.buf = append(p.buf, next)
ch, err = p.ReadByte()
if err != nil {
return nil, err
}
continue
}
p.buf = append(p.buf, ch)
ch = next
}
if bytes.HasPrefix(p.buf, []byte("\\x")) && len(p.buf)%2 == 0 {
data := p.buf[2:]
buf := make([]byte, hex.DecodedLen(len(data)))
n, err := hex.Decode(buf, data)
if err != nil {
return nil, err
}
return buf[:n], nil
}
return p.buf, nil
}
func (p *pgparser) ReadRange(ch byte) ([]byte, error) {
p.buf = p.buf[:0]
p.buf = append(p.buf, ch)
for p.Valid() {
ch = p.Read()
p.buf = append(p.buf, ch)
if ch == ']' || ch == ')' {
break
}
}
return p.buf, nil
}

View file

@ -1,15 +1,12 @@
package pgdialect
import (
"bytes"
"database/sql"
"encoding/hex"
"fmt"
"io"
"time"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/internal/parser"
"github.com/uptrace/bun/schema"
)
@ -41,7 +38,10 @@ func NewRange[T any](lower, upper T) Range[T] {
var _ sql.Scanner = (*Range[any])(nil)
func (r *Range[T]) Scan(anySrc any) (err error) {
src := anySrc.([]byte)
src, ok := anySrc.([]byte)
if !ok {
return fmt.Errorf("pgdialect: Range can't scan %T", anySrc)
}
if len(src) == 0 {
return io.ErrUnexpectedEOF
@ -90,18 +90,6 @@ func (r *Range[T]) AppendQuery(fmt schema.Formatter, buf []byte) ([]byte, error)
return buf, nil
}
func appendElem(buf []byte, val any) []byte {
switch val := val.(type) {
case time.Time:
buf = append(buf, '"')
buf = appendTime(buf, val)
buf = append(buf, '"')
return buf
default:
panic(fmt.Errorf("unsupported range type: %T", val))
}
}
func scanElem(ptr any, src []byte) ([]byte, error) {
switch ptr := ptr.(type) {
case *time.Time:
@ -117,6 +105,17 @@ func scanElem(ptr any, src []byte) ([]byte, error) {
*ptr = tm
return src, nil
case sql.Scanner:
src, str, err := readStringLiteral(src)
if err != nil {
return nil, err
}
if err := ptr.Scan(str); err != nil {
return nil, err
}
return src, nil
default:
panic(fmt.Errorf("unsupported range type: %T", ptr))
}
@ -137,104 +136,3 @@ func readStringLiteral(src []byte) ([]byte, []byte, error) {
src = p.Remaining()
return src, str, nil
}
//------------------------------------------------------------------------------
type pgparser struct {
parser.Parser
buf []byte
}
func newParser(b []byte) *pgparser {
p := new(pgparser)
p.Reset(b)
return p
}
func (p *pgparser) ReadLiteral(ch byte) []byte {
p.Unread()
lit, _ := p.ReadSep(',')
return lit
}
func (p *pgparser) ReadUnescapedSubstring(ch byte) ([]byte, error) {
return p.readSubstring(ch, false)
}
func (p *pgparser) ReadSubstring(ch byte) ([]byte, error) {
return p.readSubstring(ch, true)
}
func (p *pgparser) readSubstring(ch byte, escaped bool) ([]byte, error) {
ch, err := p.ReadByte()
if err != nil {
return nil, err
}
p.buf = p.buf[:0]
for {
if ch == '"' {
break
}
next, err := p.ReadByte()
if err != nil {
return nil, err
}
if ch == '\\' {
switch next {
case '\\', '"':
p.buf = append(p.buf, next)
ch, err = p.ReadByte()
if err != nil {
return nil, err
}
default:
p.buf = append(p.buf, '\\')
ch = next
}
continue
}
if escaped && ch == '\'' && next == '\'' {
p.buf = append(p.buf, next)
ch, err = p.ReadByte()
if err != nil {
return nil, err
}
continue
}
p.buf = append(p.buf, ch)
ch = next
}
if bytes.HasPrefix(p.buf, []byte("\\x")) && len(p.buf)%2 == 0 {
data := p.buf[2:]
buf := make([]byte, hex.DecodedLen(len(data)))
n, err := hex.Decode(buf, data)
if err != nil {
return nil, err
}
return buf[:n], nil
}
return p.buf, nil
}
func (p *pgparser) ReadRange(ch byte) ([]byte, error) {
p.buf = p.buf[:0]
p.buf = append(p.buf, ch)
for p.Valid() {
ch = p.Read()
p.buf = append(p.buf, ch)
if ch == ']' || ch == ')' {
break
}
}
return p.buf, nil
}

View file

@ -1,11 +0,0 @@
// +build appengine
package pgdialect
func bytesToString(b []byte) string {
return string(b)
}
func stringToBytes(s string) []byte {
return []byte(s)
}

View file

@ -1,11 +0,0 @@
package pgdialect
import (
"reflect"
"github.com/uptrace/bun/schema"
)
func scanner(typ reflect.Type) schema.ScannerFunc {
return schema.Scanner(typ)
}

View file

@ -44,10 +44,10 @@
)
var (
ipType = reflect.TypeOf((*net.IP)(nil)).Elem()
ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem()
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem()
ipType = reflect.TypeFor[net.IP]()
ipNetType = reflect.TypeFor[net.IPNet]()
jsonRawMessageType = reflect.TypeFor[json.RawMessage]()
nullStringType = reflect.TypeFor[sql.NullString]()
)
func (d *Dialect) DefaultVarcharLen() int {

View file

@ -1,18 +0,0 @@
// +build !appengine
package pgdialect
import "unsafe"
func bytesToString(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
func stringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(
&struct {
string
Cap int
}{s, len(s)},
))
}

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.6"
return "1.2.8"
}

View file

@ -26,7 +26,7 @@ type Dialect struct {
features feature.Feature
}
func New() *Dialect {
func New(opts ...DialectOption) *Dialect {
d := new(Dialect)
d.tables = schema.NewTables(d)
d.features = feature.CTE |
@ -42,9 +42,22 @@ func New() *Dialect {
feature.AutoIncrement |
feature.CompositeIn |
feature.DeleteReturning
for _, opt := range opts {
opt(d)
}
return d
}
type DialectOption func(d *Dialect)
func WithoutFeature(other feature.Feature) DialectOption {
return func(d *Dialect) {
d.features = d.features.Remove(other)
}
}
func (d *Dialect) Init(*sql.DB) {}
func (d *Dialect) Name() dialect.Name {

View file

@ -1,11 +0,0 @@
package sqlitedialect
import (
"reflect"
"github.com/uptrace/bun/schema"
)
func scanner(typ reflect.Type) schema.ScannerFunc {
return schema.Scanner(typ)
}

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.6"
return "1.2.8"
}

View file

@ -16,6 +16,7 @@
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/internal"
"github.com/uptrace/bun/schema"
"github.com/uptrace/opentelemetry-go-extra/otelsql"
)
@ -169,7 +170,7 @@ func (h *QueryHook) eventQuery(event *bun.QueryEvent) string {
func unformattedQuery(event *bun.QueryEvent) string {
if event.IQuery != nil {
if b, err := event.IQuery.AppendQuery(schema.NewNopFormatter(), nil); err == nil {
return bytesToString(b)
return internal.String(b)
}
}
return string(event.QueryTemplate)

View file

@ -1,11 +0,0 @@
// +build appengine
package internal
func bytesToString(b []byte) string {
return string(b)
}
func stringToBytes(s string) []byte {
return []byte(s)
}

View file

@ -1,20 +0,0 @@
//go:build !appengine
// +build !appengine
package bunotel
import "unsafe"
func bytesToString(b []byte) string {
if len(b) == 0 {
return ""
}
return unsafe.String(&b[0], len(b))
}
func stringToBytes(s string) []byte {
if s == "" {
return []byte{}
}
return unsafe.Slice(unsafe.StringData(s), len(s))
}

View file

@ -2,7 +2,7 @@
import "reflect"
var ifaceType = reflect.TypeOf((*interface{})(nil)).Elem()
var ifaceType = reflect.TypeFor[interface{}]()
type MapKey struct {
iface interface{}

125
vendor/github.com/uptrace/bun/internal/ordered/map.go generated vendored Normal file
View file

@ -0,0 +1,125 @@
package ordered
// Pair represents a key-value pair in the ordered map.
type Pair[K comparable, V any] struct {
Key K
Value V
next, prev *Pair[K, V] // Pointers to the next and previous pairs in the linked list.
}
// Map represents an ordered map.
type Map[K comparable, V any] struct {
root *Pair[K, V] // Sentinel node for the circular doubly linked list.
zero V // Zero value for the value type.
pairs map[K]*Pair[K, V] // Map from keys to pairs.
}
// NewMap creates a new ordered map with optional initial data.
func NewMap[K comparable, V any](initialData ...Pair[K, V]) *Map[K, V] {
m := &Map[K, V]{}
m.Clear()
for _, pair := range initialData {
m.Store(pair.Key, pair.Value)
}
return m
}
// Clear removes all pairs from the map.
func (m *Map[K, V]) Clear() {
if m.root != nil {
m.root.next, m.root.prev = nil, nil // avoid memory leaks
}
for _, pair := range m.pairs {
pair.next, pair.prev = nil, nil // avoid memory leaks
}
m.root = &Pair[K, V]{}
m.root.next, m.root.prev = m.root, m.root
m.pairs = make(map[K]*Pair[K, V])
}
// Len returns the number of pairs in the map.
func (m *Map[K, V]) Len() int {
return len(m.pairs)
}
// Load returns the value associated with the key, and a boolean indicating if the key was found.
func (m *Map[K, V]) Load(key K) (V, bool) {
if pair, present := m.pairs[key]; present {
return pair.Value, true
}
return m.zero, false
}
// Value returns the value associated with the key, or the zero value if the key is not found.
func (m *Map[K, V]) Value(key K) V {
if pair, present := m.pairs[key]; present {
return pair.Value
}
return m.zero
}
// Store adds or updates a key-value pair in the map.
func (m *Map[K, V]) Store(key K, value V) {
if pair, present := m.pairs[key]; present {
pair.Value = value
return
}
pair := &Pair[K, V]{Key: key, Value: value}
pair.prev = m.root.prev
m.root.prev.next = pair
m.root.prev = pair
pair.next = m.root
m.pairs[key] = pair
}
// Delete removes a key-value pair from the map.
func (m *Map[K, V]) Delete(key K) {
if pair, present := m.pairs[key]; present {
pair.prev.next = pair.next
pair.next.prev = pair.prev
pair.next, pair.prev = nil, nil // avoid memory leaks
delete(m.pairs, key)
}
}
// Range calls the given function for each key-value pair in the map in order.
func (m *Map[K, V]) Range(yield func(key K, value V) bool) {
for pair := m.root.next; pair != m.root; pair = pair.next {
if !yield(pair.Key, pair.Value) {
break
}
}
}
// Keys returns a slice of all keys in the map in order.
func (m *Map[K, V]) Keys() []K {
keys := make([]K, 0, len(m.pairs))
m.Range(func(key K, _ V) bool {
keys = append(keys, key)
return true
})
return keys
}
// Values returns a slice of all values in the map in order.
func (m *Map[K, V]) Values() []V {
values := make([]V, 0, len(m.pairs))
m.Range(func(_ K, value V) bool {
values = append(values, value)
return true
})
return values
}
// Pairs returns a slice of all key-value pairs in the map in order.
func (m *Map[K, V]) Pairs() []Pair[K, V] {
pairs := make([]Pair[K, V], 0, len(m.pairs))
m.Range(func(key K, value V) bool {
pairs = append(pairs, Pair[K, V]{Key: key, Value: value})
return true
})
return pairs
}

View file

@ -196,6 +196,9 @@ func (am *AutoMigrator) plan(ctx context.Context) (*changeset, error) {
func (am *AutoMigrator) Migrate(ctx context.Context, opts ...MigrationOption) (*MigrationGroup, error) {
migrations, _, err := am.createSQLMigrations(ctx, false)
if err != nil {
if err == errNothingToMigrate {
return new(MigrationGroup), nil
}
return nil, fmt.Errorf("auto migrate: %w", err)
}
@ -214,23 +217,37 @@ func (am *AutoMigrator) Migrate(ctx context.Context, opts ...MigrationOption) (*
// CreateSQLMigration writes required changes to a new migration file.
// Use migrate.Migrator to apply the generated migrations.
func (am *AutoMigrator) CreateSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
_, files, err := am.createSQLMigrations(ctx, true)
_, files, err := am.createSQLMigrations(ctx, false)
if err == errNothingToMigrate {
return files, nil
}
return files, err
}
// CreateTxSQLMigration writes required changes to a new migration file making sure they will be executed
// in a transaction when applied. Use migrate.Migrator to apply the generated migrations.
func (am *AutoMigrator) CreateTxSQLMigrations(ctx context.Context) ([]*MigrationFile, error) {
_, files, err := am.createSQLMigrations(ctx, false)
_, files, err := am.createSQLMigrations(ctx, true)
if err == errNothingToMigrate {
return files, nil
}
return files, err
}
// errNothingToMigrate is a sentinel error which means the database is already in a desired state.
// Should not be returned to the user -- return a nil-error instead.
var errNothingToMigrate = errors.New("nothing to migrate")
func (am *AutoMigrator) createSQLMigrations(ctx context.Context, transactional bool) (*Migrations, []*MigrationFile, error) {
changes, err := am.plan(ctx)
if err != nil {
return nil, nil, fmt.Errorf("create sql migrations: %w", err)
}
if changes.Len() == 0 {
return nil, nil, errNothingToMigrate
}
name, _ := genMigrationName(am.schemaName + "_auto")
migrations := NewMigrations(am.migrationsOpts...)
migrations.Add(Migration{
@ -282,6 +299,10 @@ func (am *AutoMigrator) createSQL(_ context.Context, migrations *Migrations, fna
return mf, nil
}
func (c *changeset) Len() int {
return len(c.operations)
}
// Func creates a MigrationFunc that applies all operations all the changeset.
func (c *changeset) Func(m sqlschema.Migrator) MigrationFunc {
return func(ctx context.Context, db *bun.DB) error {

View file

@ -26,20 +26,21 @@ func (d *detector) detectChanges() *changeset {
targetTables := d.target.GetTables()
RenameCreate:
for wantName, wantTable := range targetTables.FromOldest() {
for _, wantPair := range targetTables.Pairs() {
wantName, wantTable := wantPair.Key, wantPair.Value
// A table with this name exists in the database. We assume that schema objects won't
// be renamed to an already existing name, nor do we support such cases.
// Simply check if the table definition has changed.
if haveTable, ok := currentTables.Get(wantName); ok {
if haveTable, ok := currentTables.Load(wantName); ok {
d.detectColumnChanges(haveTable, wantTable, true)
d.detectConstraintChanges(haveTable, wantTable)
continue
}
// Find all renamed tables. We assume that renamed tables have the same signature.
for haveName, haveTable := range currentTables.FromOldest() {
if _, exists := targetTables.Get(haveName); !exists && d.canRename(haveTable, wantTable) {
for _, havePair := range currentTables.Pairs() {
haveName, haveTable := havePair.Key, havePair.Value
if _, exists := targetTables.Load(haveName); !exists && d.canRename(haveTable, wantTable) {
d.changes.Add(&RenameTableOp{
TableName: haveTable.GetName(),
NewName: wantName,
@ -65,8 +66,9 @@ func (d *detector) detectChanges() *changeset {
}
// Drop any remaining "current" tables which do not have a model.
for name, table := range currentTables.FromOldest() {
if _, keep := targetTables.Get(name); !keep {
for _, tPair := range currentTables.Pairs() {
name, table := tPair.Key, tPair.Value
if _, keep := targetTables.Load(name); !keep {
d.changes.Add(&DropTableOp{
TableName: table.GetName(),
})
@ -103,12 +105,13 @@ func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkTyp
targetColumns := target.GetColumns()
ChangeRename:
for tName, tCol := range targetColumns.FromOldest() {
for _, tPair := range targetColumns.Pairs() {
tName, tCol := tPair.Key, tPair.Value
// This column exists in the database, so it hasn't been renamed, dropped, or added.
// Still, we should not delete(columns, thisColumn), because later we will need to
// check that we do not try to rename a column to an already a name that already exists.
if cCol, ok := currentColumns.Get(tName); ok {
if cCol, ok := currentColumns.Load(tName); ok {
if checkType && !d.equalColumns(cCol, tCol) {
d.changes.Add(&ChangeColumnTypeOp{
TableName: target.GetName(),
@ -122,9 +125,10 @@ func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkTyp
// Column tName does not exist in the database -- it's been either renamed or added.
// Find renamed columns first.
for cName, cCol := range currentColumns.FromOldest() {
for _, cPair := range currentColumns.Pairs() {
cName, cCol := cPair.Key, cPair.Value
// Cannot rename if a column with this name already exists or the types differ.
if _, exists := targetColumns.Get(cName); exists || !d.equalColumns(tCol, cCol) {
if _, exists := targetColumns.Load(cName); exists || !d.equalColumns(tCol, cCol) {
continue
}
d.changes.Add(&RenameColumnOp{
@ -149,8 +153,9 @@ func (d *detector) detectColumnChanges(current, target sqlschema.Table, checkTyp
}
// Drop columns which do not exist in the target schema and were not renamed.
for cName, cCol := range currentColumns.FromOldest() {
if _, keep := targetColumns.Get(cName); !keep {
for _, cPair := range currentColumns.Pairs() {
cName, cCol := cPair.Key, cPair.Value
if _, keep := targetColumns.Load(cName); !keep {
d.changes.Add(&DropColumnOp{
TableName: target.GetName(),
ColumnName: cName,
@ -325,7 +330,7 @@ func newSignature(t sqlschema.Table, eq CompareTypeFunc) signature {
// scan iterates over table's field and counts occurrences of each unique column definition.
func (s *signature) scan(t sqlschema.Table) {
for _, icol := range t.GetColumns().FromOldest() {
for _, icol := range t.GetColumns().Values() {
scanCol := icol.(*sqlschema.BaseColumn)
// This is slightly more expensive than if the columns could be compared directly
// and we always did s.underlying[col]++, but we get type-equivalence in return.

View file

@ -4,12 +4,12 @@
"slices"
"strings"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/schema"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
type Database interface {
GetTables() *orderedmap.OrderedMap[string, Table]
GetTables() *ordered.Map[string, Table]
GetForeignKeys() map[ForeignKey]string
}
@ -20,11 +20,11 @@ type Database interface {
// Dialects and only dialects can use it to implement the Database interface.
// Other packages must use the Database interface.
type BaseDatabase struct {
Tables *orderedmap.OrderedMap[string, Table]
Tables *ordered.Map[string, Table]
ForeignKeys map[ForeignKey]string
}
func (ds BaseDatabase) GetTables() *orderedmap.OrderedMap[string, Table] {
func (ds BaseDatabase) GetTables() *ordered.Map[string, Table] {
return ds.Tables
}

View file

@ -7,8 +7,8 @@
"strings"
"github.com/uptrace/bun"
"github.com/uptrace/bun/internal/ordered"
"github.com/uptrace/bun/schema"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
type InspectorDialect interface {
@ -102,25 +102,25 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
BaseDatabase: BaseDatabase{
ForeignKeys: make(map[ForeignKey]string),
},
Tables: orderedmap.New[string, Table](),
Tables: ordered.NewMap[string, Table](),
}
for _, t := range bmi.tables.All() {
if t.Schema != bmi.SchemaName {
continue
}
columns := orderedmap.New[string, Column]()
columns := ordered.NewMap[string, Column]()
for _, f := range t.Fields {
sqlType, length, err := parseLen(f.CreateTableSQLType)
if err != nil {
return nil, fmt.Errorf("parse length in %q: %w", f.CreateTableSQLType, err)
}
columns.Set(f.Name, &BaseColumn{
columns.Store(f.Name, &BaseColumn{
Name: f.Name,
SQLType: strings.ToLower(sqlType), // TODO(dyma): maybe this is not necessary after Column.Eq()
VarcharLen: length,
DefaultValue: exprToLower(f.SQLDefault),
DefaultValue: exprOrLiteral(f.SQLDefault),
IsNullable: !f.NotNull,
IsAutoIncrement: f.AutoIncrement,
IsIdentity: f.Identity,
@ -162,7 +162,7 @@ func (bmi *BunModelInspector) Inspect(ctx context.Context) (Database, error) {
// produces
// schema.Table{ Schema: "favourite", Name: "favourite.books" }
tableName := strings.TrimPrefix(t.Name, t.Schema+".")
state.Tables.Set(tableName, &BunTable{
state.Tables.Store(tableName, &BunTable{
BaseTable: BaseTable{
Schema: t.Schema,
Name: tableName,
@ -211,12 +211,13 @@ func parseLen(typ string) (string, int, error) {
return typ[:paren], length, nil
}
// exprToLower converts string to lowercase, if it does not contain a string literal 'lit'.
// exprOrLiteral converts string to lowercase, if it does not contain a string literal 'lit'
// and trims the surrounding '' otherwise.
// Use it to ensure that user-defined default values in the models are always comparable
// to those returned by the database inspector, regardless of the case convention in individual drivers.
func exprToLower(s string) string {
func exprOrLiteral(s string) string {
if strings.HasPrefix(s, "'") && strings.HasSuffix(s, "'") {
return s
return strings.Trim(s, "'")
}
return strings.ToLower(s)
}
@ -225,10 +226,10 @@ func exprToLower(s string) string {
type BunModelSchema struct {
BaseDatabase
Tables *orderedmap.OrderedMap[string, Table]
Tables *ordered.Map[string, Table]
}
func (ms BunModelSchema) GetTables() *orderedmap.OrderedMap[string, Table] {
func (ms BunModelSchema) GetTables() *ordered.Map[string, Table] {
return ms.Tables
}

View file

@ -1,13 +1,13 @@
package sqlschema
import (
orderedmap "github.com/wk8/go-ordered-map/v2"
"github.com/uptrace/bun/internal/ordered"
)
type Table interface {
GetSchema() string
GetName() string
GetColumns() *orderedmap.OrderedMap[string, Column]
GetColumns() *ordered.Map[string, Column]
GetPrimaryKey() *PrimaryKey
GetUniqueConstraints() []Unique
}
@ -23,7 +23,7 @@ type BaseTable struct {
Name string
// ColumnDefinitions map each column name to the column definition.
Columns *orderedmap.OrderedMap[string, Column]
Columns *ordered.Map[string, Column]
// PrimaryKey holds the primary key definition.
// A nil value means that no primary key is defined for the table.
@ -47,7 +47,7 @@ func (td *BaseTable) GetName() string {
return td.Name
}
func (td *BaseTable) GetColumns() *orderedmap.OrderedMap[string, Column] {
func (td *BaseTable) GetColumns() *ordered.Map[string, Column] {
return td.Columns
}

View file

@ -14,8 +14,8 @@
var errNilModel = errors.New("bun: Model(nil)")
var (
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
timeType = reflect.TypeFor[time.Time]()
bytesType = reflect.TypeFor[[]byte]()
)
type Model = schema.Model

View file

@ -99,7 +99,7 @@ func (m *mapSliceModel) appendValues(fmter schema.Formatter, b []byte) (_ []byte
slice := *m.dest
b = append(b, "VALUES "...)
if m.db.features.Has(feature.ValuesRow) {
if m.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')
@ -118,7 +118,7 @@ func (m *mapSliceModel) appendValues(fmter schema.Formatter, b []byte) (_ []byte
for i, el := range slice {
if i > 0 {
b = append(b, "), "...)
if m.db.features.Has(feature.ValuesRow) {
if m.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')

View file

@ -3,6 +3,7 @@
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"reflect"
@ -152,7 +153,13 @@ func modelKey(key []interface{}, strct reflect.Value, fields []*schema.Field) []
// The value is then used as a map key.
func indirectFieldValue(field reflect.Value) interface{} {
if field.Kind() != reflect.Ptr {
return field.Interface()
i := field.Interface()
if valuer, ok := i.(driver.Valuer); ok {
if v, err := valuer.Value(); err == nil {
return v
}
}
return i
}
if field.IsNil() {
return nil

View file

@ -103,7 +103,7 @@ func (m *m2mModel) scanM2MColumn(column string, src interface{}) error {
if err := field.Scan(dest, src); err != nil {
return err
}
m.structKey = append(m.structKey, dest.Interface())
m.structKey = append(m.structKey, indirectFieldValue(dest))
break
}
}

View file

@ -1,6 +1,6 @@
{
"name": "gobun",
"version": "1.2.6",
"version": "1.2.8",
"main": "index.js",
"repository": "git@github.com:uptrace/bun.git",
"author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",

View file

@ -201,7 +201,7 @@ func (q *baseQuery) beforeAppendModel(ctx context.Context, query Query) error {
}
func (q *baseQuery) hasFeature(feature feature.Feature) bool {
return q.db.features.Has(feature)
return q.db.HasFeature(feature)
}
//------------------------------------------------------------------------------

View file

@ -13,6 +13,7 @@ type AddColumnQuery struct {
baseQuery
ifNotExists bool
comment string
}
var _ Query = (*AddColumnQuery)(nil)
@ -85,6 +86,14 @@ func (q *AddColumnQuery) IfNotExists() *AddColumnQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *AddColumnQuery) Comment(comment string) *AddColumnQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *AddColumnQuery) Operation() string {
return "ADD COLUMN"
}
@ -93,6 +102,9 @@ func (q *AddColumnQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if len(q.columns) != 1 {
return nil, fmt.Errorf("bun: AddColumnQuery requires exactly one column")
}

View file

@ -11,6 +11,8 @@
type DropColumnQuery struct {
baseQuery
comment string
}
var _ Query = (*DropColumnQuery)(nil)
@ -85,6 +87,14 @@ func (q *DropColumnQuery) ColumnExpr(query string, args ...interface{}) *DropCol
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropColumnQuery) Comment(comment string) *DropColumnQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropColumnQuery) Operation() string {
return "DROP COLUMN"
}
@ -93,6 +103,9 @@ func (q *DropColumnQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byt
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if len(q.columns) != 1 {
return nil, fmt.Errorf("bun: DropColumnQuery requires exactly one column")
}

View file

@ -15,6 +15,8 @@ type DeleteQuery struct {
whereBaseQuery
orderLimitOffsetQuery
returningQuery
comment string
}
var _ Query = (*DeleteQuery)(nil)
@ -174,6 +176,14 @@ func (q *DeleteQuery) Returning(query string, args ...interface{}) *DeleteQuery
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DeleteQuery) Comment(comment string) *DeleteQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DeleteQuery) Operation() string {
return "DELETE"
}
@ -183,6 +193,8 @@ func (q *DeleteQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
return nil, q.err
}
b = appendComment(b, q.comment)
fmter = formatterWithModel(fmter, q)
if q.isSoftDelete() {
@ -201,7 +213,7 @@ func (q *DeleteQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
return upd.AppendQuery(fmter, b)
}
withAlias := q.db.features.Has(feature.DeleteTableAlias)
withAlias := q.db.HasFeature(feature.DeleteTableAlias)
b, err = q.appendWith(fmter, b)
if err != nil {

View file

@ -20,6 +20,7 @@ type CreateIndexQuery struct {
index schema.QueryWithArgs
using schema.QueryWithArgs
include []schema.QueryWithArgs
comment string
}
var _ Query = (*CreateIndexQuery)(nil)
@ -149,6 +150,14 @@ func (q *CreateIndexQuery) WhereOr(query string, args ...interface{}) *CreateInd
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *CreateIndexQuery) Comment(comment string) *CreateIndexQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *CreateIndexQuery) Operation() string {
return "CREATE INDEX"
}
@ -158,6 +167,8 @@ func (q *CreateIndexQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "CREATE "...)
if q.unique {

View file

@ -16,6 +16,7 @@ type DropIndexQuery struct {
ifExists bool
index schema.QueryWithArgs
comment string
}
var _ Query = (*DropIndexQuery)(nil)
@ -74,6 +75,14 @@ func (q *DropIndexQuery) Index(query string, args ...interface{}) *DropIndexQuer
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropIndexQuery) Comment(comment string) *DropIndexQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropIndexQuery) Operation() string {
return "DROP INDEX"
}
@ -83,6 +92,8 @@ func (q *DropIndexQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "DROP INDEX "...)
if q.concurrently {

View file

@ -22,6 +22,7 @@ type InsertQuery struct {
ignore bool
replace bool
comment string
}
var _ Query = (*InsertQuery)(nil)
@ -164,6 +165,14 @@ func (q *InsertQuery) Replace() *InsertQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *InsertQuery) Comment(comment string) *InsertQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *InsertQuery) Operation() string {
return "INSERT"
}
@ -173,6 +182,8 @@ func (q *InsertQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
return nil, q.err
}
b = appendComment(b, q.comment)
fmter = formatterWithModel(fmter, q)
b, err = q.appendWith(fmter, b)
@ -190,7 +201,7 @@ func (q *InsertQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
}
b = append(b, "INTO "...)
if q.db.features.Has(feature.InsertTableAlias) && !q.on.IsZero() {
if q.db.HasFeature(feature.InsertTableAlias) && !q.on.IsZero() {
b, err = q.appendFirstTableWithAlias(fmter, b)
} else {
b, err = q.appendFirstTable(fmter, b)
@ -385,9 +396,9 @@ func (q *InsertQuery) appendSliceValues(
}
func (q *InsertQuery) getFields() ([]*schema.Field, error) {
hasIdentity := q.db.features.Has(feature.Identity)
hasIdentity := q.db.HasFeature(feature.Identity)
if len(q.columns) > 0 || q.db.features.Has(feature.DefaultPlaceholder) && !hasIdentity {
if len(q.columns) > 0 || q.db.HasFeature(feature.DefaultPlaceholder) && !hasIdentity {
return q.baseQuery.getFields()
}
@ -640,8 +651,8 @@ func (q *InsertQuery) afterInsertHook(ctx context.Context) error {
}
func (q *InsertQuery) tryLastInsertID(res sql.Result, dest []interface{}) error {
if q.db.features.Has(feature.Returning) ||
q.db.features.Has(feature.Output) ||
if q.db.HasFeature(feature.Returning) ||
q.db.HasFeature(feature.Output) ||
q.table == nil ||
len(q.table.PKs) != 1 ||
!q.table.PKs[0].AutoIncrement {

View file

@ -18,6 +18,7 @@ type MergeQuery struct {
using schema.QueryWithArgs
on schema.QueryWithArgs
when []schema.QueryAppender
comment string
}
var _ Query = (*MergeQuery)(nil)
@ -150,6 +151,14 @@ func (q *MergeQuery) When(expr string, args ...interface{}) *MergeQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *MergeQuery) Comment(comment string) *MergeQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *MergeQuery) Operation() string {
return "MERGE"
}
@ -159,6 +168,8 @@ func (q *MergeQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, er
return nil, q.err
}
b = appendComment(b, q.comment)
fmter = formatterWithModel(fmter, q)
b, err = q.appendWith(fmter, b)

View file

@ -12,6 +12,7 @@ type RawQuery struct {
query string
args []interface{}
comment string
}
// Deprecated: Use NewRaw instead. When add it to IDB, it conflicts with the sql.Conn#Raw
@ -56,6 +57,12 @@ func (q *RawQuery) Scan(ctx context.Context, dest ...interface{}) error {
return err
}
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *RawQuery) Comment(comment string) *RawQuery {
q.comment = comment
return q
}
func (q *RawQuery) scanOrExec(
ctx context.Context, dest []interface{}, hasDest bool,
) (sql.Result, error) {
@ -90,6 +97,8 @@ func (q *RawQuery) scanOrExec(
}
func (q *RawQuery) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
b = appendComment(b, q.comment)
return fmter.AppendQuery(b, q.query, q.args...), nil
}

View file

@ -32,6 +32,7 @@ type SelectQuery struct {
selFor schema.QueryWithArgs
union []union
comment string
}
var _ Query = (*SelectQuery)(nil)
@ -381,6 +382,43 @@ func (q *SelectQuery) Relation(name string, apply ...func(*SelectQuery) *SelectQ
return q
}
q.applyToRelation(join, apply...)
return q
}
type RelationOpts struct {
// Apply applies additional options to the relation.
Apply func(*SelectQuery) *SelectQuery
// AdditionalJoinOnConditions adds additional conditions to the JOIN ON clause.
AdditionalJoinOnConditions []schema.QueryWithArgs
}
// RelationWithOpts adds a relation to the query with additional options.
func (q *SelectQuery) RelationWithOpts(name string, opts RelationOpts) *SelectQuery {
if q.tableModel == nil {
q.setErr(errNilModel)
return q
}
join := q.tableModel.join(name)
if join == nil {
q.setErr(fmt.Errorf("%s does not have relation=%q", q.table, name))
return q
}
if opts.Apply != nil {
q.applyToRelation(join, opts.Apply)
}
if len(opts.AdditionalJoinOnConditions) > 0 {
join.additionalJoinOnConditions = opts.AdditionalJoinOnConditions
}
return q
}
func (q *SelectQuery) applyToRelation(join *relationJoin, apply ...func(*SelectQuery) *SelectQuery) {
var apply1, apply2 func(*SelectQuery) *SelectQuery
if len(join.Relation.Condition) > 0 {
@ -407,8 +445,6 @@ func (q *SelectQuery) Relation(name string, apply ...func(*SelectQuery) *SelectQ
return q
}
return q
}
func (q *SelectQuery) forEachInlineRelJoin(fn func(*relationJoin) error) error {
@ -460,11 +496,21 @@ func (q *SelectQuery) selectJoins(ctx context.Context, joins []relationJoin) err
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *SelectQuery) Comment(comment string) *SelectQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *SelectQuery) Operation() string {
return "SELECT"
}
func (q *SelectQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, err error) {
b = appendComment(b, q.comment)
return q.appendQuery(fmter, b, false)
}
@ -803,6 +849,14 @@ func (q *SelectQuery) scanResult(ctx context.Context, dest ...interface{}) (sql.
if err != nil {
return nil, err
}
if len(dest) > 0 && q.tableModel != nil && len(q.tableModel.getJoins()) > 0 {
for _, j := range q.tableModel.getJoins() {
switch j.Relation.Type {
case schema.HasManyRelation, schema.ManyToManyRelation:
return nil, fmt.Errorf("When querying has-many or many-to-many relationships, you should use Model instead of the dest parameter in Scan.")
}
}
}
if q.table != nil {
if err := q.beforeSelectHook(ctx); err != nil {

View file

@ -32,6 +32,7 @@ type CreateTableQuery struct {
fks []schema.QueryWithArgs
partitionBy schema.QueryWithArgs
tablespace schema.QueryWithArgs
comment string
}
var _ Query = (*CreateTableQuery)(nil)
@ -131,6 +132,14 @@ func (q *CreateTableQuery) WithForeignKeys() *CreateTableQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *CreateTableQuery) Comment(comment string) *CreateTableQuery {
q.comment = comment
return q
}
// ------------------------------------------------------------------------------
func (q *CreateTableQuery) Operation() string {
return "CREATE TABLE"
}
@ -139,6 +148,9 @@ func (q *CreateTableQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []by
if q.err != nil {
return nil, q.err
}
b = appendComment(b, q.comment)
if q.table == nil {
return nil, errNilModel
}

View file

@ -13,6 +13,7 @@ type DropTableQuery struct {
cascadeQuery
ifExists bool
comment string
}
var _ Query = (*DropTableQuery)(nil)
@ -80,6 +81,14 @@ func (q *DropTableQuery) Restrict() *DropTableQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *DropTableQuery) Comment(comment string) *DropTableQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *DropTableQuery) Operation() string {
return "DROP TABLE"
}
@ -89,6 +98,8 @@ func (q *DropTableQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte
return nil, q.err
}
b = appendComment(b, q.comment)
b = append(b, "DROP TABLE "...)
if q.ifExists {
b = append(b, "IF EXISTS "...)

View file

@ -14,6 +14,7 @@ type TruncateTableQuery struct {
cascadeQuery
continueIdentity bool
comment string
}
var _ Query = (*TruncateTableQuery)(nil)
@ -81,6 +82,14 @@ func (q *TruncateTableQuery) Restrict() *TruncateTableQuery {
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *TruncateTableQuery) Comment(comment string) *TruncateTableQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *TruncateTableQuery) Operation() string {
return "TRUNCATE TABLE"
}
@ -92,6 +101,8 @@ func (q *TruncateTableQuery) AppendQuery(
return nil, q.err
}
b = appendComment(b, q.comment)
if !fmter.HasFeature(feature.TableTruncate) {
b = append(b, "DELETE FROM "...)
@ -110,7 +121,7 @@ func (q *TruncateTableQuery) AppendQuery(
return nil, err
}
if q.db.features.Has(feature.TableIdentity) {
if q.db.HasFeature(feature.TableIdentity) {
if q.continueIdentity {
b = append(b, " CONTINUE IDENTITY"...)
} else {

View file

@ -23,6 +23,7 @@ type UpdateQuery struct {
joins []joinQuery
omitZero bool
comment string
}
var _ Query = (*UpdateQuery)(nil)
@ -243,6 +244,14 @@ func (q *UpdateQuery) Returning(query string, args ...interface{}) *UpdateQuery
//------------------------------------------------------------------------------
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *UpdateQuery) Comment(comment string) *UpdateQuery {
q.comment = comment
return q
}
//------------------------------------------------------------------------------
func (q *UpdateQuery) Operation() string {
return "UPDATE"
}
@ -252,6 +261,8 @@ func (q *UpdateQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
return nil, q.err
}
b = appendComment(b, q.comment)
fmter = formatterWithModel(fmter, q)
b, err = q.appendWith(fmter, b)

View file

@ -14,6 +14,7 @@ type ValuesQuery struct {
customValueQuery
withOrder bool
comment string
}
var (
@ -64,6 +65,12 @@ func (q *ValuesQuery) WithOrder() *ValuesQuery {
return q
}
// Comment adds a comment to the query, wrapped by /* ... */.
func (q *ValuesQuery) Comment(comment string) *ValuesQuery {
q.comment = comment
return q
}
func (q *ValuesQuery) AppendNamedArg(fmter schema.Formatter, b []byte, name string) ([]byte, bool) {
switch name {
case "Columns":
@ -121,6 +128,8 @@ func (q *ValuesQuery) AppendQuery(fmter schema.Formatter, b []byte) (_ []byte, e
return nil, errNilModel
}
b = appendComment(b, q.comment)
fmter = formatterWithModel(fmter, q)
if q.tableModel != nil {
@ -145,7 +154,7 @@ func (q *ValuesQuery) appendQuery(
fields []*schema.Field,
) (_ []byte, err error) {
b = append(b, "VALUES "...)
if q.db.features.Has(feature.ValuesRow) {
if q.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')
@ -168,7 +177,7 @@ func (q *ValuesQuery) appendQuery(
for i := 0; i < sliceLen; i++ {
if i > 0 {
b = append(b, "), "...)
if q.db.features.Has(feature.ValuesRow) {
if q.db.HasFeature(feature.ValuesRow) {
b = append(b, "ROW("...)
} else {
b = append(b, '(')

View file

@ -16,6 +16,8 @@ type relationJoin struct {
JoinModel TableModel
Relation *schema.Relation
additionalJoinOnConditions []schema.QueryWithArgs
apply func(*SelectQuery) *SelectQuery
columns []schema.QueryWithArgs
}
@ -63,7 +65,7 @@ func (j *relationJoin) manyQuery(q *SelectQuery) *SelectQuery {
var where []byte
if q.db.dialect.Features().Has(feature.CompositeIn) {
if q.db.HasFeature(feature.CompositeIn) {
return j.manyQueryCompositeIn(where, q)
}
return j.manyQueryMulti(where, q)
@ -86,6 +88,11 @@ func (j *relationJoin) manyQueryCompositeIn(where []byte, q *SelectQuery) *Selec
j.Relation.BasePKs,
)
where = append(where, ")"...)
if len(j.additionalJoinOnConditions) > 0 {
where = append(where, " AND "...)
where = appendAdditionalJoinOnConditions(q.db.Formatter(), where, j.additionalJoinOnConditions)
}
q = q.Where(internal.String(where))
if j.Relation.PolymorphicField != nil {
@ -111,6 +118,10 @@ func (j *relationJoin) manyQueryMulti(where []byte, q *SelectQuery) *SelectQuery
q = q.Where(internal.String(where))
if len(j.additionalJoinOnConditions) > 0 {
q = q.Where(internal.String(appendAdditionalJoinOnConditions(q.db.Formatter(), []byte{}, j.additionalJoinOnConditions)))
}
if j.Relation.PolymorphicField != nil {
q = q.Where("? = ?", j.Relation.PolymorphicField.SQLName, j.Relation.PolymorphicValue)
}
@ -204,6 +215,12 @@ func (j *relationJoin) m2mQuery(q *SelectQuery) *SelectQuery {
join = append(join, ") IN ("...)
join = appendChildValues(fmter, join, j.BaseModel.rootValue(), index, j.Relation.BasePKs)
join = append(join, ")"...)
if len(j.additionalJoinOnConditions) > 0 {
join = append(join, " AND "...)
join = appendAdditionalJoinOnConditions(fmter, join, j.additionalJoinOnConditions)
}
q = q.Join(internal.String(join))
joinTable := j.JoinModel.Table()
@ -330,6 +347,11 @@ func (j *relationJoin) appendHasOneJoin(
b = j.appendSoftDelete(fmter, b, q.flags)
}
if len(j.additionalJoinOnConditions) > 0 {
b = append(b, " AND "...)
b = appendAdditionalJoinOnConditions(fmter, b, j.additionalJoinOnConditions)
}
return b, nil
}
@ -417,3 +439,15 @@ func appendMultiValues(
b = append(b, ')')
return b
}
func appendAdditionalJoinOnConditions(
fmter schema.Formatter, b []byte, conditions []schema.QueryWithArgs,
) []byte {
for i, cond := range conditions {
if i > 0 {
b = append(b, " AND "...)
}
b = fmter.AppendQuery(b, cond.Query, cond.Args...)
}
return b
}

View file

@ -24,7 +24,7 @@ type BeforeAppendModelHook interface {
BeforeAppendModel(ctx context.Context, query Query) error
}
var beforeAppendModelHookType = reflect.TypeOf((*BeforeAppendModelHook)(nil)).Elem()
var beforeAppendModelHookType = reflect.TypeFor[BeforeAppendModelHook]()
//------------------------------------------------------------------------------
@ -32,7 +32,7 @@ type BeforeScanRowHook interface {
BeforeScanRow(context.Context) error
}
var beforeScanRowHookType = reflect.TypeOf((*BeforeScanRowHook)(nil)).Elem()
var beforeScanRowHookType = reflect.TypeFor[BeforeScanRowHook]()
//------------------------------------------------------------------------------
@ -40,4 +40,4 @@ type AfterScanRowHook interface {
AfterScanRow(context.Context) error
}
var afterScanRowHookType = reflect.TypeOf((*AfterScanRowHook)(nil)).Elem()
var afterScanRowHookType = reflect.TypeFor[AfterScanRowHook]()

View file

@ -10,18 +10,18 @@
)
var (
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
timePtrType = reflect.TypeOf((*time.Time)(nil))
timeType = timePtrType.Elem()
ipType = reflect.TypeOf((*net.IP)(nil)).Elem()
ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem()
netipPrefixType = reflect.TypeOf((*netip.Prefix)(nil)).Elem()
netipAddrType = reflect.TypeOf((*netip.Addr)(nil)).Elem()
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
bytesType = reflect.TypeFor[[]byte]()
timePtrType = reflect.TypeFor[*time.Time]()
timeType = reflect.TypeFor[time.Time]()
ipType = reflect.TypeFor[net.IP]()
ipNetType = reflect.TypeFor[net.IPNet]()
netipPrefixType = reflect.TypeFor[netip.Prefix]()
netipAddrType = reflect.TypeFor[netip.Addr]()
jsonRawMessageType = reflect.TypeFor[json.RawMessage]()
driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
queryAppenderType = reflect.TypeOf((*QueryAppender)(nil)).Elem()
jsonMarshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
driverValuerType = reflect.TypeFor[driver.Valuer]()
queryAppenderType = reflect.TypeFor[QueryAppender]()
jsonMarshalerType = reflect.TypeFor[json.Marshaler]()
)
func indirectType(t reflect.Type) reflect.Type {

View file

@ -18,7 +18,7 @@
"github.com/uptrace/bun/internal"
)
var scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
var scannerType = reflect.TypeFor[sql.Scanner]()
type ScannerFunc func(dest reflect.Value, src interface{}) error
@ -38,8 +38,8 @@ func init() {
reflect.Uint32: scanUint64,
reflect.Uint64: scanUint64,
reflect.Uintptr: scanUint64,
reflect.Float32: scanFloat64,
reflect.Float64: scanFloat64,
reflect.Float32: scanFloat,
reflect.Float64: scanFloat,
reflect.Complex64: nil,
reflect.Complex128: nil,
reflect.Array: nil,
@ -214,11 +214,14 @@ func scanUint64(dest reflect.Value, src interface{}) error {
}
}
func scanFloat64(dest reflect.Value, src interface{}) error {
func scanFloat(dest reflect.Value, src interface{}) error {
switch src := src.(type) {
case nil:
dest.SetFloat(0)
return nil
case float32:
dest.SetFloat(float64(src))
return nil
case float64:
dest.SetFloat(src)
return nil

View file

@ -13,12 +13,12 @@
)
var (
bunNullTimeType = reflect.TypeOf((*NullTime)(nil)).Elem()
nullTimeType = reflect.TypeOf((*sql.NullTime)(nil)).Elem()
nullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem()
nullFloatType = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem()
nullIntType = reflect.TypeOf((*sql.NullInt64)(nil)).Elem()
nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem()
bunNullTimeType = reflect.TypeFor[NullTime]()
nullTimeType = reflect.TypeFor[sql.NullTime]()
nullBoolType = reflect.TypeFor[sql.NullBool]()
nullFloatType = reflect.TypeFor[sql.NullFloat64]()
nullIntType = reflect.TypeFor[sql.NullInt64]()
nullStringType = reflect.TypeFor[sql.NullString]()
)
var sqlTypes = []string{

View file

@ -4,6 +4,7 @@
"database/sql"
"fmt"
"reflect"
"sort"
"strings"
"time"
@ -22,7 +23,7 @@
)
var (
baseModelType = reflect.TypeOf((*BaseModel)(nil)).Elem()
baseModelType = reflect.TypeFor[BaseModel]()
tableNameInflector = inflection.Plural
)
@ -75,7 +76,7 @@ type structField struct {
Table *Table
}
func (table *Table) init(dialect Dialect, typ reflect.Type, canAddr bool) {
func (table *Table) init(dialect Dialect, typ reflect.Type) {
table.dialect = dialect
table.Type = typ
table.ZeroValue = reflect.New(table.Type).Elem()
@ -90,7 +91,7 @@ func (table *Table) init(dialect Dialect, typ reflect.Type, canAddr bool) {
table.Fields = make([]*Field, 0, typ.NumField())
table.FieldMap = make(map[string]*Field, typ.NumField())
table.processFields(typ, canAddr)
table.processFields(typ)
hooks := []struct {
typ reflect.Type
@ -110,7 +111,7 @@ func (table *Table) init(dialect Dialect, typ reflect.Type, canAddr bool) {
}
}
func (t *Table) processFields(typ reflect.Type, canAddr bool) {
func (t *Table) processFields(typ reflect.Type) {
type embeddedField struct {
prefix string
index []int
@ -250,6 +251,30 @@ type embeddedField struct {
t.addUnique(subfield, embfield.prefix, v)
}
}
if len(embedded) > 0 {
// https://github.com/uptrace/bun/issues/1095
// < v1.2, all fields follow the order corresponding to the struct
// >= v1.2, < v1.2.8, fields of nested structs have been moved to the end.
// >= v1.2.8, The default behavior remains the same as initially,
sortFieldsByStruct(t.allFields)
sortFieldsByStruct(t.Fields)
sortFieldsByStruct(t.PKs)
sortFieldsByStruct(t.DataFields)
}
}
func sortFieldsByStruct(fields []*Field) {
sort.Slice(fields, func(i, j int) bool {
left, right := fields[i], fields[j]
for k := 0; k < len(left.Index) && k < len(right.Index); k++ {
if left.Index[k] != right.Index[k] {
return left.Index[k] < right.Index[k]
}
}
// NOTE: should not reach
return true
})
}
func (t *Table) addUnique(field *Field, prefix string, tagOptions []string) {

View file

@ -72,7 +72,7 @@ func (t *Tables) InProgress(typ reflect.Type) *Table {
table := new(Table)
t.inProgress[typ] = table
table.init(t.dialect, typ, false)
table.init(t.dialect, typ)
return table
}

View file

@ -5,7 +5,7 @@
"reflect"
)
var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
var isZeroerType = reflect.TypeFor[isZeroer]()
type isZeroer interface {
IsZero() bool

View file

@ -1,6 +1,10 @@
package bun
import "reflect"
import (
"fmt"
"reflect"
"strings"
)
func indirect(v reflect.Value) reflect.Value {
switch v.Kind() {
@ -66,3 +70,19 @@ func sliceElemType(v reflect.Value) reflect.Type {
}
return indirectType(elemType)
}
// appendComment adds comment in the header of the query into buffer
func appendComment(b []byte, name string) []byte {
if name == "" {
return b
}
name = strings.Map(func(r rune) rune {
if r == '\x00' {
return -1
}
return r
}, name)
name = strings.ReplaceAll(name, `/*`, `/\*`)
name = strings.ReplaceAll(name, `*/`, `*\/`)
return append(b, fmt.Sprintf("/* %s */ ", name)...)
}

View file

@ -2,5 +2,5 @@
// Version is the current release version.
func Version() string {
return "1.2.6"
return "1.2.8"
}

View file

@ -1 +0,0 @@
/vendor/

View file

@ -1,78 +0,0 @@
run:
tests: false
linters:
disable-all: true
enable:
- asciicheck
- bidichk
- bodyclose
- containedctx
- contextcheck
- decorder
# Disabling depguard as there is no guarded list of imports
# - depguard
- dogsled
- dupl
- durationcheck
- errcheck
- errchkjson
- errname
- errorlint
- exportloopref
- forbidigo
- funlen
# Don't need gci and goimports
# - gci
- gochecknoglobals
- gochecknoinits
- gocognit
- goconst
- gocritic
- gocyclo
- godox
- gofmt
- gofumpt
- goheader
- goimports
- mnd
- gomoddirectives
- gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- grouper
- importas
- ineffassign
- lll
- maintidx
- makezero
- misspell
- nakedret
- nilerr
- nilnil
- noctx
- nolintlint
- paralleltest
- prealloc
- predeclared
- promlinter
- revive
- rowserrcheck
- sqlclosecheck
- staticcheck
- stylecheck
- tagliatelle
- tenv
- testpackage
- thelper
- tparallel
# FIXME: doesn't support 1.23 yet
# - typecheck
- unconvert
- unparam
- unused
- varnamelen
- wastedassign
- whitespace

View file

@ -1,38 +0,0 @@
# Changelog
[comment]: # (Changes since last release go here)
## 2.1.8 - Jun 27th 2023
* Added support for YAML serialization/deserialization
## 2.1.7 - Apr 13th 2023
* Renamed test_utils.go to utils_test.go
## 2.1.6 - Feb 15th 2023
* Added `GetAndMoveToBack()` and `GetAndMoveToFront()` methods
## 2.1.5 - Dec 13th 2022
* Added `Value()` method
## 2.1.4 - Dec 12th 2022
* Fixed a bug with UTF-8 special characters in JSON keys
## 2.1.3 - Dec 11th 2022
* Added support for JSON marshalling/unmarshalling of wrapper of primitive types
## 2.1.2 - Dec 10th 2022
* Allowing to pass options to `New`, to give a capacity hint, or initial data
* Allowing to deserialize nested ordered maps from JSON without having to explicitly instantiate them
* Added the `AddPairs` method
## 2.1.1 - Dec 9th 2022
* Fixing a bug with JSON marshalling
## 2.1.0 - Dec 7th 2022
* Added support for JSON serialization/deserialization

View file

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,32 +0,0 @@
.DEFAULT_GOAL := all
.PHONY: all
all: test_with_fuzz lint
# the TEST_FLAGS env var can be set to eg run only specific tests
TEST_COMMAND = go test -v -count=1 -race -cover $(TEST_FLAGS)
.PHONY: test
test:
$(TEST_COMMAND)
.PHONY: bench
bench:
go test -bench=.
FUZZ_TIME ?= 10s
# see https://github.com/golang/go/issues/46312
# and https://stackoverflow.com/a/72673487/4867444
# if we end up having more fuzz tests
.PHONY: test_with_fuzz
test_with_fuzz:
$(TEST_COMMAND) -fuzz=FuzzRoundTripJSON -fuzztime=$(FUZZ_TIME)
$(TEST_COMMAND) -fuzz=FuzzRoundTripYAML -fuzztime=$(FUZZ_TIME)
.PHONY: fuzz
fuzz: test_with_fuzz
.PHONY: lint
lint:
golangci-lint run

View file

@ -1,207 +0,0 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/wk8/go-ordered-map/v2.svg)](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2)
[![Build Status](https://circleci.com/gh/wk8/go-ordered-map.svg?style=svg)](https://app.circleci.com/pipelines/github/wk8/go-ordered-map)
# Golang Ordered Maps
Same as regular maps, but also remembers the order in which keys were inserted, akin to [Python's `collections.OrderedDict`s](https://docs.python.org/3.7/library/collections.html#ordereddict-objects).
It offers the following features:
* optimal runtime performance (all operations are constant time)
* optimal memory usage (only one copy of values, no unnecessary memory allocation)
* allows iterating from newest or oldest keys indifferently, without memory copy, allowing to `break` the iteration, and in time linear to the number of keys iterated over rather than the total length of the ordered map
* supports any generic types for both keys and values. If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) that takes and returns generic `interface{}`s instead of using generics
* idiomatic API, akin to that of [`container/list`](https://golang.org/pkg/container/list)
* support for JSON and YAML marshalling
## Documentation
[The full documentation is available on pkg.go.dev](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2).
## Installation
```bash
go get -u github.com/wk8/go-ordered-map/v2
```
Or use your favorite golang vendoring tool!
## Supported go versions
Go >= 1.23 is required to use version >= 2.2.0 of this library, as it uses generics and iterators.
if you're running go < 1.23, you can use [version 2.1.8](https://github.com/wk8/go-ordered-map/tree/v2.1.8) instead.
If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) instead.
## Example / usage
```go
package main
import (
"fmt"
"github.com/wk8/go-ordered-map/v2"
)
func main() {
om := orderedmap.New[string, string]()
om.Set("foo", "bar")
om.Set("bar", "baz")
om.Set("coucou", "toi")
fmt.Println(om.Get("foo")) // => "bar", true
fmt.Println(om.Get("i dont exist")) // => "", false
// iterating pairs from oldest to newest:
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
} // prints:
// foo => bar
// bar => baz
// coucou => toi
// iterating over the 2 newest pairs:
i := 0
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
i++
if i >= 2 {
break
}
} // prints:
// coucou => toi
// bar => baz
}
```
An `OrderedMap`'s keys must implement `comparable`, and its values can be anything, for example:
```go
type myStruct struct {
payload string
}
func main() {
om := orderedmap.New[int, *myStruct]()
om.Set(12, &myStruct{"foo"})
om.Set(1, &myStruct{"bar"})
value, present := om.Get(12)
if !present {
panic("should be there!")
}
fmt.Println(value.payload) // => foo
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
fmt.Printf("%d => %s\n", pair.Key, pair.Value.payload)
} // prints:
// 12 => foo
// 1 => bar
}
```
Also worth noting that you can provision ordered maps with a capacity hint, as you would do by passing an optional hint to `make(map[K]V, capacity`):
```go
om := orderedmap.New[int, *myStruct](28)
```
You can also pass in some initial data to store in the map:
```go
om := orderedmap.New[int, string](orderedmap.WithInitialData[int, string](
orderedmap.Pair[int, string]{
Key: 12,
Value: "foo",
},
orderedmap.Pair[int, string]{
Key: 28,
Value: "bar",
},
))
```
`OrderedMap`s also support JSON serialization/deserialization, and preserves order:
```go
// serialization
data, err := json.Marshal(om)
...
// deserialization
om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect
err := json.Unmarshal(data, &om)
...
```
Similarly, it also supports YAML serialization/deserialization using the yaml.v3 package, which also preserves order:
```go
// serialization
data, err := yaml.Marshal(om)
...
// deserialization
om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect
err := yaml.Unmarshal(data, &om)
...
```
## Iterator support (go >= 1.23)
The `FromOldest`, `FromNewest`, `KeysFromOldest`, `KeysFromNewest`, `ValuesFromOldest` and `ValuesFromNewest` methods return iterators over the map's pairs, starting from the oldest or newest pair, respectively.
For example:
```go
om := orderedmap.New[int, string]()
om.Set(1, "foo")
om.Set(2, "bar")
om.Set(3, "baz")
for k, v := range om.FromOldest() {
fmt.Printf("%d => %s\n", k, v)
}
// prints:
// 1 => foo
// 2 => bar
// 3 => baz
for k := range om.KeysNewest() {
fmt.Printf("%d\n", k)
}
// prints:
// 3
// 2
// 1
```
`From` is a convenience function that creates a new `OrderedMap` from an iterator over key-value pairs.
```go
om := orderedmap.New[int, string]()
om.Set(1, "foo")
om.Set(2, "bar")
om.Set(3, "baz")
om2 := orderedmap.From(om.FromOldest())
for k, v := range om2.FromOldest() {
fmt.Printf("%d => %s\n", k, v)
}
// prints:
// 1 => foo
// 2 => bar
// 3 => baz
```
## Alternatives
There are several other ordered map golang implementations out there, but I believe that at the time of writing none of them offer the same functionality as this library; more specifically:
* [iancoleman/orderedmap](https://github.com/iancoleman/orderedmap) only accepts `string` keys, its `Delete` operations are linear
* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) uses a channel for iterations, and leaks goroutines if the iteration is interrupted before fully traversing the map
* [mantyr/iterator](https://github.com/mantyr/iterator) also uses a channel for iterations, and its `Delete` operations are linear
* [samdolan/go-ordered-map](https://github.com/samdolan/go-ordered-map) adds unnecessary locking (users should add their own locking instead if they need it), its `Delete` and `Get` operations are linear, iterations trigger a linear memory allocation

View file

@ -1,182 +0,0 @@
package orderedmap
import (
"bytes"
"encoding"
"encoding/json"
"fmt"
"reflect"
"unicode/utf8"
"github.com/buger/jsonparser"
"github.com/mailru/easyjson/jwriter"
)
var (
_ json.Marshaler = &OrderedMap[int, any]{}
_ json.Unmarshaler = &OrderedMap[int, any]{}
)
// MarshalJSON implements the json.Marshaler interface.
func (om *OrderedMap[K, V]) MarshalJSON() ([]byte, error) { //nolint:funlen
if om == nil || om.list == nil {
return []byte("null"), nil
}
writer := jwriter.Writer{}
writer.RawByte('{')
for pair, firstIteration := om.Oldest(), true; pair != nil; pair = pair.Next() {
if firstIteration {
firstIteration = false
} else {
writer.RawByte(',')
}
switch key := any(pair.Key).(type) {
case string:
writer.String(key)
case encoding.TextMarshaler:
writer.RawByte('"')
writer.Raw(key.MarshalText())
writer.RawByte('"')
case int:
writer.IntStr(key)
case int8:
writer.Int8Str(key)
case int16:
writer.Int16Str(key)
case int32:
writer.Int32Str(key)
case int64:
writer.Int64Str(key)
case uint:
writer.UintStr(key)
case uint8:
writer.Uint8Str(key)
case uint16:
writer.Uint16Str(key)
case uint32:
writer.Uint32Str(key)
case uint64:
writer.Uint64Str(key)
default:
// this switch takes care of wrapper types around primitive types, such as
// type myType string
switch keyValue := reflect.ValueOf(key); keyValue.Type().Kind() {
case reflect.String:
writer.String(keyValue.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
writer.Int64Str(keyValue.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
writer.Uint64Str(keyValue.Uint())
default:
return nil, fmt.Errorf("unsupported key type: %T", key)
}
}
writer.RawByte(':')
// the error is checked at the end of the function
writer.Raw(json.Marshal(pair.Value))
}
writer.RawByte('}')
return dumpWriter(&writer)
}
func dumpWriter(writer *jwriter.Writer) ([]byte, error) {
if writer.Error != nil {
return nil, writer.Error
}
var buf bytes.Buffer
buf.Grow(writer.Size())
if _, err := writer.DumpTo(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (om *OrderedMap[K, V]) UnmarshalJSON(data []byte) error {
if om.list == nil {
om.initialize(0)
}
return jsonparser.ObjectEach(
data,
func(keyData []byte, valueData []byte, dataType jsonparser.ValueType, offset int) error {
if dataType == jsonparser.String {
// jsonparser removes the enclosing quotes; we need to restore them to make a valid JSON
valueData = data[offset-len(valueData)-2 : offset]
}
var key K
var value V
switch typedKey := any(&key).(type) {
case *string:
s, err := decodeUTF8(keyData)
if err != nil {
return err
}
*typedKey = s
case encoding.TextUnmarshaler:
if err := typedKey.UnmarshalText(keyData); err != nil {
return err
}
case *int, *int8, *int16, *int32, *int64, *uint, *uint8, *uint16, *uint32, *uint64:
if err := json.Unmarshal(keyData, typedKey); err != nil {
return err
}
default:
// this switch takes care of wrapper types around primitive types, such as
// type myType string
switch reflect.TypeOf(key).Kind() {
case reflect.String:
s, err := decodeUTF8(keyData)
if err != nil {
return err
}
convertedKeyData := reflect.ValueOf(s).Convert(reflect.TypeOf(key))
reflect.ValueOf(&key).Elem().Set(convertedKeyData)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if err := json.Unmarshal(keyData, &key); err != nil {
return err
}
default:
return fmt.Errorf("unsupported key type: %T", key)
}
}
if err := json.Unmarshal(valueData, &value); err != nil {
return err
}
om.Set(key, value)
return nil
})
}
func decodeUTF8(input []byte) (string, error) {
remaining, offset := input, 0
runes := make([]rune, 0, len(remaining))
for len(remaining) > 0 {
r, size := utf8.DecodeRune(remaining)
if r == utf8.RuneError && size <= 1 {
return "", fmt.Errorf("not a valid UTF-8 string (at position %d): %s", offset, string(input))
}
runes = append(runes, r)
remaining = remaining[size:]
offset += size
}
return string(runes), nil
}

View file

@ -1,373 +0,0 @@
// Package orderedmap implements an ordered map, i.e. a map that also keeps track of
// the order in which keys were inserted.
//
// All operations are constant-time.
//
// Github repo: https://github.com/wk8/go-ordered-map
package orderedmap
import (
"fmt"
"iter"
list "github.com/bahlo/generic-list-go"
)
type Pair[K comparable, V any] struct {
Key K
Value V
element *list.Element[*Pair[K, V]]
}
type OrderedMap[K comparable, V any] struct {
pairs map[K]*Pair[K, V]
list *list.List[*Pair[K, V]]
}
type initConfig[K comparable, V any] struct {
capacity int
initialData []Pair[K, V]
}
type InitOption[K comparable, V any] func(config *initConfig[K, V])
// WithCapacity allows giving a capacity hint for the map, akin to the standard make(map[K]V, capacity).
func WithCapacity[K comparable, V any](capacity int) InitOption[K, V] {
return func(c *initConfig[K, V]) {
c.capacity = capacity
}
}
// WithInitialData allows passing in initial data for the map.
func WithInitialData[K comparable, V any](initialData ...Pair[K, V]) InitOption[K, V] {
return func(c *initConfig[K, V]) {
c.initialData = initialData
if c.capacity < len(initialData) {
c.capacity = len(initialData)
}
}
}
// New creates a new OrderedMap.
// options can either be one or several InitOption[K, V], or a single integer,
// which is then interpreted as a capacity hint, à la make(map[K]V, capacity).
func New[K comparable, V any](options ...any) *OrderedMap[K, V] {
orderedMap := &OrderedMap[K, V]{}
var config initConfig[K, V]
for _, untypedOption := range options {
switch option := untypedOption.(type) {
case int:
if len(options) != 1 {
invalidOption()
}
config.capacity = option
case InitOption[K, V]:
option(&config)
default:
invalidOption()
}
}
orderedMap.initialize(config.capacity)
orderedMap.AddPairs(config.initialData...)
return orderedMap
}
const invalidOptionMessage = `when using orderedmap.New[K,V]() with options, either provide one or several InitOption[K, V]; or a single integer which is then interpreted as a capacity hint, à la make(map[K]V, capacity).` //nolint:lll
func invalidOption() { panic(invalidOptionMessage) }
func (om *OrderedMap[K, V]) initialize(capacity int) {
om.pairs = make(map[K]*Pair[K, V], capacity)
om.list = list.New[*Pair[K, V]]()
}
// Get looks for the given key, and returns the value associated with it,
// or V's nil value if not found. The boolean it returns says whether the key is present in the map.
func (om *OrderedMap[K, V]) Get(key K) (val V, present bool) {
if pair, present := om.pairs[key]; present {
return pair.Value, true
}
return
}
// Load is an alias for Get, mostly to present an API similar to `sync.Map`'s.
func (om *OrderedMap[K, V]) Load(key K) (V, bool) {
return om.Get(key)
}
// Value returns the value associated with the given key or the zero value.
func (om *OrderedMap[K, V]) Value(key K) (val V) {
if pair, present := om.pairs[key]; present {
val = pair.Value
}
return
}
// GetPair looks for the given key, and returns the pair associated with it,
// or nil if not found. The Pair struct can then be used to iterate over the ordered map
// from that point, either forward or backward.
func (om *OrderedMap[K, V]) GetPair(key K) *Pair[K, V] {
return om.pairs[key]
}
// Set sets the key-value pair, and returns what `Get` would have returned
// on that key prior to the call to `Set`.
func (om *OrderedMap[K, V]) Set(key K, value V) (val V, present bool) {
if pair, present := om.pairs[key]; present {
oldValue := pair.Value
pair.Value = value
return oldValue, true
}
pair := &Pair[K, V]{
Key: key,
Value: value,
}
pair.element = om.list.PushBack(pair)
om.pairs[key] = pair
return
}
// AddPairs allows setting multiple pairs at a time. It's equivalent to calling
// Set on each pair sequentially.
func (om *OrderedMap[K, V]) AddPairs(pairs ...Pair[K, V]) {
for _, pair := range pairs {
om.Set(pair.Key, pair.Value)
}
}
// Store is an alias for Set, mostly to present an API similar to `sync.Map`'s.
func (om *OrderedMap[K, V]) Store(key K, value V) (V, bool) {
return om.Set(key, value)
}
// Delete removes the key-value pair, and returns what `Get` would have returned
// on that key prior to the call to `Delete`.
func (om *OrderedMap[K, V]) Delete(key K) (val V, present bool) {
if pair, present := om.pairs[key]; present {
om.list.Remove(pair.element)
delete(om.pairs, key)
return pair.Value, true
}
return
}
// Len returns the length of the ordered map.
func (om *OrderedMap[K, V]) Len() int {
if om == nil || om.pairs == nil {
return 0
}
return len(om.pairs)
}
// Oldest returns a pointer to the oldest pair. It's meant to be used to iterate on the ordered map's
// pairs from the oldest to the newest, e.g.:
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
func (om *OrderedMap[K, V]) Oldest() *Pair[K, V] {
if om == nil || om.list == nil {
return nil
}
return listElementToPair(om.list.Front())
}
// Newest returns a pointer to the newest pair. It's meant to be used to iterate on the ordered map's
// pairs from the newest to the oldest, e.g.:
// for pair := orderedMap.Newest(); pair != nil; pair = pair.Prev() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
func (om *OrderedMap[K, V]) Newest() *Pair[K, V] {
if om == nil || om.list == nil {
return nil
}
return listElementToPair(om.list.Back())
}
// Next returns a pointer to the next pair.
func (p *Pair[K, V]) Next() *Pair[K, V] {
return listElementToPair(p.element.Next())
}
// Prev returns a pointer to the previous pair.
func (p *Pair[K, V]) Prev() *Pair[K, V] {
return listElementToPair(p.element.Prev())
}
func listElementToPair[K comparable, V any](element *list.Element[*Pair[K, V]]) *Pair[K, V] {
if element == nil {
return nil
}
return element.Value
}
// KeyNotFoundError may be returned by functions in this package when they're called with keys that are not present
// in the map.
type KeyNotFoundError[K comparable] struct {
MissingKey K
}
func (e *KeyNotFoundError[K]) Error() string {
return fmt.Sprintf("missing key: %v", e.MissingKey)
}
// MoveAfter moves the value associated with key to its new position after the one associated with markKey.
// Returns an error iff key or markKey are not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveAfter(key, markKey K) error {
elements, err := om.getElements(key, markKey)
if err != nil {
return err
}
om.list.MoveAfter(elements[0], elements[1])
return nil
}
// MoveBefore moves the value associated with key to its new position before the one associated with markKey.
// Returns an error iff key or markKey are not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveBefore(key, markKey K) error {
elements, err := om.getElements(key, markKey)
if err != nil {
return err
}
om.list.MoveBefore(elements[0], elements[1])
return nil
}
func (om *OrderedMap[K, V]) getElements(keys ...K) ([]*list.Element[*Pair[K, V]], error) {
elements := make([]*list.Element[*Pair[K, V]], len(keys))
for i, k := range keys {
pair, present := om.pairs[k]
if !present {
return nil, &KeyNotFoundError[K]{k}
}
elements[i] = pair.element
}
return elements, nil
}
// MoveToBack moves the value associated with key to the back of the ordered map,
// i.e. makes it the newest pair in the map.
// Returns an error iff key is not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveToBack(key K) error {
_, err := om.GetAndMoveToBack(key)
return err
}
// MoveToFront moves the value associated with key to the front of the ordered map,
// i.e. makes it the oldest pair in the map.
// Returns an error iff key is not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveToFront(key K) error {
_, err := om.GetAndMoveToFront(key)
return err
}
// GetAndMoveToBack combines Get and MoveToBack in the same call. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) GetAndMoveToBack(key K) (val V, err error) {
if pair, present := om.pairs[key]; present {
val = pair.Value
om.list.MoveToBack(pair.element)
} else {
err = &KeyNotFoundError[K]{key}
}
return
}
// GetAndMoveToFront combines Get and MoveToFront in the same call. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) GetAndMoveToFront(key K) (val V, err error) {
if pair, present := om.pairs[key]; present {
val = pair.Value
om.list.MoveToFront(pair.element)
} else {
err = &KeyNotFoundError[K]{key}
}
return
}
// FromOldest returns an iterator over all the key-value pairs in the map, starting from the oldest pair.
func (om *OrderedMap[K, V]) FromOldest() iter.Seq2[K, V] {
return func(yield func(K, V) bool) {
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
if !yield(pair.Key, pair.Value) {
return
}
}
}
}
// FromNewest returns an iterator over all the key-value pairs in the map, starting from the newest pair.
func (om *OrderedMap[K, V]) FromNewest() iter.Seq2[K, V] {
return func(yield func(K, V) bool) {
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
if !yield(pair.Key, pair.Value) {
return
}
}
}
}
// KeysFromOldest returns an iterator over all the keys in the map, starting from the oldest pair.
func (om *OrderedMap[K, V]) KeysFromOldest() iter.Seq[K] {
return func(yield func(K) bool) {
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
if !yield(pair.Key) {
return
}
}
}
}
// KeysFromNewest returns an iterator over all the keys in the map, starting from the newest pair.
func (om *OrderedMap[K, V]) KeysFromNewest() iter.Seq[K] {
return func(yield func(K) bool) {
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
if !yield(pair.Key) {
return
}
}
}
}
// ValuesFromOldest returns an iterator over all the values in the map, starting from the oldest pair.
func (om *OrderedMap[K, V]) ValuesFromOldest() iter.Seq[V] {
return func(yield func(V) bool) {
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
if !yield(pair.Value) {
return
}
}
}
}
// ValuesFromNewest returns an iterator over all the values in the map, starting from the newest pair.
func (om *OrderedMap[K, V]) ValuesFromNewest() iter.Seq[V] {
return func(yield func(V) bool) {
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
if !yield(pair.Value) {
return
}
}
}
}
// From creates a new OrderedMap from an iterator over key-value pairs.
func From[K comparable, V any](i iter.Seq2[K, V]) *OrderedMap[K, V] {
oMap := New[K, V]()
for k, v := range i {
oMap.Set(k, v)
}
return oMap
}

View file

@ -1,71 +0,0 @@
package orderedmap
import (
"fmt"
"gopkg.in/yaml.v3"
)
var (
_ yaml.Marshaler = &OrderedMap[int, any]{}
_ yaml.Unmarshaler = &OrderedMap[int, any]{}
)
// MarshalYAML implements the yaml.Marshaler interface.
func (om *OrderedMap[K, V]) MarshalYAML() (interface{}, error) {
if om == nil {
return []byte("null"), nil
}
node := yaml.Node{
Kind: yaml.MappingNode,
}
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
key, value := pair.Key, pair.Value
keyNode := &yaml.Node{}
// serialize key to yaml, then deserialize it back into the node
// this is a hack to get the correct tag for the key
if err := keyNode.Encode(key); err != nil {
return nil, err
}
valueNode := &yaml.Node{}
if err := valueNode.Encode(value); err != nil {
return nil, err
}
node.Content = append(node.Content, keyNode, valueNode)
}
return &node, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (om *OrderedMap[K, V]) UnmarshalYAML(value *yaml.Node) error {
if value.Kind != yaml.MappingNode {
return fmt.Errorf("pipeline must contain YAML mapping, has %v", value.Kind)
}
if om.list == nil {
om.initialize(0)
}
for index := 0; index < len(value.Content); index += 2 {
var key K
var val V
if err := value.Content[index].Decode(&key); err != nil {
return err
}
if err := value.Content[index+1].Decode(&val); err != nil {
return err
}
om.Set(key, val)
}
return nil
}

View file

@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
return sendfile(outfd, infd, offset, count)
}
func Dup3(oldfd, newfd, flags int) error {
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
return EINVAL
}
how := F_DUP2FD
if flags&O_CLOEXEC != 0 {
how = F_DUP2FD_CLOEXEC
}
_, err := fcntl(oldfd, how, newfd)
return err
}
/*
* Exposed directly
*/

View file

@ -43,8 +43,8 @@ type DLL struct {
// LoadDLL loads DLL file into memory.
//
// Warning: using LoadDLL without an absolute path name is subject to
// DLL preloading attacks. To safely load a system DLL, use LazyDLL
// with System set to true, or use LoadLibraryEx directly.
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
// or use [LoadLibraryEx] directly.
func LoadDLL(name string) (dll *DLL, err error) {
namep, err := UTF16PtrFromString(name)
if err != nil {
@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
}
// NewLazyDLL creates new LazyDLL associated with DLL file.
//
// Warning: using NewLazyDLL without an absolute path name is subject to
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
func NewLazyDLL(name string) *LazyDLL {
return &LazyDLL{Name: name}
}
@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
}
return &DLL{Name: name, Handle: h}, nil
}
type errString string
func (s errString) Error() string { return string(s) }

34
vendor/modules.txt vendored
View file

@ -91,9 +91,6 @@ github.com/asaskevich/govalidator
## explicit
github.com/aymerick/douceur/css
github.com/aymerick/douceur/parser
# github.com/bahlo/generic-list-go v0.2.0
## explicit; go 1.18
github.com/bahlo/generic-list-go
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
@ -101,9 +98,6 @@ github.com/beorn7/perks/quantile
## explicit; go 1.14
github.com/buckket/go-blurhash
github.com/buckket/go-blurhash/base83
# github.com/buger/jsonparser v1.1.1
## explicit; go 1.13
github.com/buger/jsonparser
# github.com/bytedance/sonic v1.12.6
## explicit; go 1.17
github.com/bytedance/sonic
@ -938,27 +932,28 @@ github.com/ugorji/go/codec
github.com/ulule/limiter/v3
github.com/ulule/limiter/v3/drivers/store/common
github.com/ulule/limiter/v3/drivers/store/memory
# github.com/uptrace/bun v1.2.6
## explicit; go 1.23
# github.com/uptrace/bun v1.2.8
## explicit; go 1.22.0
github.com/uptrace/bun
github.com/uptrace/bun/dialect
github.com/uptrace/bun/dialect/feature
github.com/uptrace/bun/dialect/sqltype
github.com/uptrace/bun/extra/bunjson
github.com/uptrace/bun/internal
github.com/uptrace/bun/internal/ordered
github.com/uptrace/bun/internal/parser
github.com/uptrace/bun/internal/tagparser
github.com/uptrace/bun/migrate
github.com/uptrace/bun/migrate/sqlschema
github.com/uptrace/bun/schema
# github.com/uptrace/bun/dialect/pgdialect v1.2.6
## explicit; go 1.23
# github.com/uptrace/bun/dialect/pgdialect v1.2.8
## explicit; go 1.22.0
github.com/uptrace/bun/dialect/pgdialect
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.6
## explicit; go 1.23
# github.com/uptrace/bun/dialect/sqlitedialect v1.2.8
## explicit; go 1.22.0
github.com/uptrace/bun/dialect/sqlitedialect
# github.com/uptrace/bun/extra/bunotel v1.2.6
## explicit; go 1.23
# github.com/uptrace/bun/extra/bunotel v1.2.8
## explicit; go 1.22.0
github.com/uptrace/bun/extra/bunotel
# github.com/uptrace/opentelemetry-go-extra/otelsql v0.3.2
## explicit; go 1.22
@ -975,9 +970,6 @@ github.com/vmihailenco/tagparser/v2/internal/parser
# github.com/wagslane/go-password-validator v0.3.0
## explicit; go 1.16
github.com/wagslane/go-password-validator
# github.com/wk8/go-ordered-map/v2 v2.1.9-0.20240816141633-0a40785b4f41
## explicit; go 1.23
github.com/wk8/go-ordered-map/v2
# github.com/yuin/goldmark v1.7.8
## explicit; go 1.19
github.com/yuin/goldmark
@ -998,7 +990,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw
go.mongodb.org/mongo-driver/bson/bsontype
go.mongodb.org/mongo-driver/bson/primitive
go.mongodb.org/mongo-driver/x/bsonx/bsoncore
# go.opentelemetry.io/otel v1.32.0 => go.opentelemetry.io/otel v1.29.0
# go.opentelemetry.io/otel v1.33.0 => go.opentelemetry.io/otel v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@ -1039,7 +1031,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry
# go.opentelemetry.io/otel/exporters/prometheus v0.51.0
## explicit; go 1.21
go.opentelemetry.io/otel/exporters/prometheus
# go.opentelemetry.io/otel/metric v1.32.0 => go.opentelemetry.io/otel/metric v1.29.0
# go.opentelemetry.io/otel/metric v1.33.0 => go.opentelemetry.io/otel/metric v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
@ -1060,7 +1052,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate
go.opentelemetry.io/otel/sdk/metric/internal/exemplar
go.opentelemetry.io/otel/sdk/metric/internal/x
go.opentelemetry.io/otel/sdk/metric/metricdata
# go.opentelemetry.io/otel/trace v1.32.0 => go.opentelemetry.io/otel/trace v1.29.0
# go.opentelemetry.io/otel/trace v1.33.0 => go.opentelemetry.io/otel/trace v1.29.0
## explicit; go 1.21
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@ -1146,7 +1138,7 @@ golang.org/x/oauth2/internal
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.28.0
# golang.org/x/sys v0.29.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/unix