mirror of
https://github.com/superseriousbusiness/gotosocial.git
synced 2024-10-31 22:40:01 +00:00
[feature] Read + Write tombstones for deleted Actors (#1005)
* [feature] Read + Write tombstones for deleted Actors * copyTombstone * update to use resultcache instead of old ttl cache Signed-off-by: kim <grufwub@gmail.com> * update go-cache library to fix result cache capacity / ordering bugs Signed-off-by: kim <grufwub@gmail.com> * bump go-cache/v3 to v3.1.6 to fix bugs Signed-off-by: kim <grufwub@gmail.com> * switch on status code * better explain ErrGone reasoning Signed-off-by: kim <grufwub@gmail.com> Co-authored-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
948e90b95a
commit
edcee14d07
47 changed files with 3808 additions and 7 deletions
4
go.mod
4
go.mod
|
@ -6,6 +6,7 @@ require (
|
|||
codeberg.org/gruf/go-bytesize v1.0.0
|
||||
codeberg.org/gruf/go-byteutil v1.0.2
|
||||
codeberg.org/gruf/go-cache/v2 v2.1.4
|
||||
codeberg.org/gruf/go-cache/v3 v3.1.6
|
||||
codeberg.org/gruf/go-debug v1.2.0
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.2
|
||||
codeberg.org/gruf/go-kv v1.5.2
|
||||
|
@ -65,9 +66,12 @@ require (
|
|||
codeberg.org/gruf/go-fastcopy v1.1.1 // indirect
|
||||
codeberg.org/gruf/go-fastpath v1.0.3 // indirect
|
||||
codeberg.org/gruf/go-hashenc v1.0.2 // indirect
|
||||
codeberg.org/gruf/go-mangler v1.1.1 // indirect
|
||||
codeberg.org/gruf/go-maps v1.0.3 // indirect
|
||||
codeberg.org/gruf/go-pools v1.1.0 // indirect
|
||||
codeberg.org/gruf/go-sched v1.1.1 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect
|
||||
github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect
|
||||
|
|
13
go.sum
13
go.sum
|
@ -71,6 +71,8 @@ codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZ
|
|||
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||
codeberg.org/gruf/go-cache/v2 v2.1.4 h1:r+6wJiTHZn0qqf+p1VtAjGOgXXJl7s8txhPIwoSMZtI=
|
||||
codeberg.org/gruf/go-cache/v2 v2.1.4/go.mod h1:j7teiz814lG0PfSfnUs+6HA+2/jcjTAR71Ou3Wbt2Xk=
|
||||
codeberg.org/gruf/go-cache/v3 v3.1.6 h1:LMpQoLRoGTH64WyLCew6wMVqC3Vzve09MCYbt5c0WR4=
|
||||
codeberg.org/gruf/go-cache/v3 v3.1.6/go.mod h1:h6im2UVGdrGtNt4IVKARVeoW4kAdok5ts7CbH15UWXs=
|
||||
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
|
||||
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
||||
|
@ -87,6 +89,10 @@ codeberg.org/gruf/go-kv v1.5.2 h1:B0RkAXLUXYn3Za1NzTXOcUvAc+JUC2ZadTMkCUDa0mc=
|
|||
codeberg.org/gruf/go-kv v1.5.2/go.mod h1:al6ASW/2CbGqz2YcM8B00tvWnVi1bU1CH3HYs5tZxo4=
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1 h1:RP2u059EQKTBFV3cN8X6xDxNk2RkzqdgXGKflKqB7Oc=
|
||||
codeberg.org/gruf/go-logger/v2 v2.2.1/go.mod h1:m/vBfG5jNUmYXI8Hg9aVSk7Pn8YgEBITQB/B/CzdRss=
|
||||
codeberg.org/gruf/go-mangler v1.1.1 h1:Ci56Le8PKrfESTNYjIZu3AoqAf/O2mX8BTWC6EuN7HA=
|
||||
codeberg.org/gruf/go-mangler v1.1.1/go.mod h1:z6nL/uyp1AnEFPMD7YO3J/kQTY6fBPlIjwhqBMyPExo=
|
||||
codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es=
|
||||
codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA=
|
||||
codeberg.org/gruf/go-mutexes v1.1.4 h1:HWaIZavPL92SBJxNOlIXAmAT5CB2hAs72/lBN31jnzM=
|
||||
codeberg.org/gruf/go-mutexes v1.1.4/go.mod h1:1j/6/MBeBQUedAtAtysLLnBKogfOZAxdym0E3wlaBD8=
|
||||
codeberg.org/gruf/go-pools v1.1.0 h1:LbYP24eQLl/YI1fSU2pafiwhGol1Z1zPjRrMsXpF88s=
|
||||
|
@ -102,6 +108,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=
|
||||
github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY=
|
||||
|
@ -113,6 +120,7 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3
|
|||
github.com/buckket/go-blurhash v1.1.0 h1:X5M6r0LIvwdvKiUtiNcRL2YlmOfMzYobI3VCKCZc9Do=
|
||||
github.com/buckket/go-blurhash v1.1.0/go.mod h1:aT2iqo5W9vu9GpyoLErKfTHwgODsZp3bQfXjXJUxNb8=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U=
|
||||
|
@ -129,6 +137,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
|||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g=
|
||||
|
@ -185,6 +194,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
|
|||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fxamacker/cbor v1.5.1 h1:XjQWBgdmQyqimslUh5r4tUGmoqzHmBFQOImkWGi2awg=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8=
|
||||
github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -462,6 +472,7 @@ github.com/minio/minio-go/v7 v7.0.43 h1:14Q4lwblqTdlAmba05oq5xL0VBLHi06zS4yLnIkz
|
|||
github.com/minio/minio-go/v7 v7.0.43/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -526,6 +537,7 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE
|
|||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw=
|
||||
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
|
||||
|
@ -612,6 +624,7 @@ github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAh
|
|||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/wagslane/go-password-validator v0.3.0 h1:vfxOPzGHkz5S146HDpavl0cw1DSVP061Ry2PX0/ON6I=
|
||||
github.com/wagslane/go-password-validator v0.3.0/go.mod h1:TI1XJ6T5fRdRnHqHt14pvy1tNVnrwe7m3/f1f2fDphQ=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
|
|
|
@ -88,6 +88,7 @@ type DBService struct {
|
|||
db.Status
|
||||
db.Timeline
|
||||
db.User
|
||||
db.Tombstone
|
||||
conn *DBConn
|
||||
}
|
||||
|
||||
|
@ -181,12 +182,16 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
status := &statusDB{conn: conn, cache: cache.NewStatusCache()}
|
||||
emoji := &emojiDB{conn: conn, cache: cache.NewEmojiCache()}
|
||||
timeline := &timelineDB{conn: conn}
|
||||
tombstone := &tombstoneDB{conn: conn}
|
||||
|
||||
// Setup DB cross-referencing
|
||||
accounts.status = status
|
||||
status.accounts = accounts
|
||||
timeline.status = status
|
||||
|
||||
// Initialize db structs
|
||||
tombstone.init()
|
||||
|
||||
ps := &DBService{
|
||||
Account: accounts,
|
||||
Admin: &adminDB{
|
||||
|
@ -228,7 +233,8 @@ func NewBunDBService(ctx context.Context) (db.DB, error) {
|
|||
conn: conn,
|
||||
cache: userCache,
|
||||
},
|
||||
conn: conn,
|
||||
Tombstone: tombstone,
|
||||
conn: conn,
|
||||
}
|
||||
|
||||
// we can confidently return this useable service now
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
func init() {
|
||||
up := func(ctx context.Context, db *bun.DB) error {
|
||||
return db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
|
||||
if _, err := tx.NewCreateTable().Model(>smodel.Tombstone{}).IfNotExists().Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.
|
||||
NewCreateIndex().
|
||||
Model(>smodel.Tombstone{}).
|
||||
Index("tombstone_uri_idx").
|
||||
Column("uri").
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
down := func(ctx context.Context, db *bun.DB) error {
|
||||
return db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := Migrations.Register(up, down); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
101
internal/db/bundb/tombstone.go
Normal file
101
internal/db/bundb/tombstone.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package bundb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"codeberg.org/gruf/go-cache/v3/result"
|
||||
)
|
||||
|
||||
type tombstoneDB struct {
|
||||
conn *DBConn
|
||||
cache *result.Cache[*gtsmodel.Tombstone]
|
||||
}
|
||||
|
||||
func (t *tombstoneDB) init() {
|
||||
// Initialize tombstone result cache
|
||||
t.cache = result.NewSized([]string{
|
||||
"ID",
|
||||
"URI",
|
||||
}, func(t1 *gtsmodel.Tombstone) *gtsmodel.Tombstone {
|
||||
t2 := new(gtsmodel.Tombstone)
|
||||
*t2 = *t1
|
||||
return t2
|
||||
}, 1000)
|
||||
|
||||
// Set cache TTL and start sweep routine
|
||||
t.cache.SetTTL(time.Minute*5, false)
|
||||
t.cache.Start(time.Second * 10)
|
||||
}
|
||||
|
||||
func (t *tombstoneDB) GetTombstoneByURI(ctx context.Context, uri string) (*gtsmodel.Tombstone, db.Error) {
|
||||
return t.cache.Load("URI", func() (*gtsmodel.Tombstone, error) {
|
||||
var tomb gtsmodel.Tombstone
|
||||
|
||||
q := t.conn.
|
||||
NewSelect().
|
||||
Model(&tomb).
|
||||
Where("? = ?", bun.Ident("tombstone.uri"), uri)
|
||||
|
||||
if err := q.Scan(ctx); err != nil {
|
||||
return nil, t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
return &tomb, nil
|
||||
}, uri)
|
||||
}
|
||||
|
||||
func (t *tombstoneDB) TombstoneExistsWithURI(ctx context.Context, uri string) (bool, db.Error) {
|
||||
tomb, err := t.GetTombstoneByURI(ctx, uri)
|
||||
if err == db.ErrNoEntries {
|
||||
err = nil
|
||||
}
|
||||
return (tomb != nil), err
|
||||
}
|
||||
|
||||
func (t *tombstoneDB) PutTombstone(ctx context.Context, tombstone *gtsmodel.Tombstone) db.Error {
|
||||
return t.cache.Store(tombstone, func() error {
|
||||
_, err := t.conn.
|
||||
NewInsert().
|
||||
Model(tombstone).
|
||||
Exec(ctx)
|
||||
return t.conn.ProcessError(err)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *tombstoneDB) DeleteTombstone(ctx context.Context, id string) db.Error {
|
||||
if _, err := t.conn.
|
||||
NewDelete().
|
||||
TableExpr("? AS ?", bun.Ident("tombstones"), bun.Ident("tombstone")).
|
||||
Where("? = ?", bun.Ident("tombstone.id"), id).
|
||||
Exec(ctx); err != nil {
|
||||
return t.conn.ProcessError(err)
|
||||
}
|
||||
|
||||
// Invalidate from cache by ID
|
||||
t.cache.Invalidate("ID", id)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -45,6 +45,7 @@ type DB interface {
|
|||
Status
|
||||
Timeline
|
||||
User
|
||||
Tombstone
|
||||
|
||||
/*
|
||||
USEFUL CONVERSION FUNCTIONS
|
||||
|
|
40
internal/db/tombstone.go
Normal file
40
internal/db/tombstone.go
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
)
|
||||
|
||||
// Tombstone contains functionality for storing + retrieving tombstones for remote AP Activities + Objects.
|
||||
type Tombstone interface {
|
||||
// GetTombstoneByURI attempts to fetch a tombstone by the given URI.
|
||||
GetTombstoneByURI(ctx context.Context, uri string) (*gtsmodel.Tombstone, Error)
|
||||
|
||||
// TombstoneExistsWithURI returns true if a tombstone with the given URI exists.
|
||||
TombstoneExistsWithURI(ctx context.Context, uri string) (bool, Error)
|
||||
|
||||
// PutTombstone creates a new tombstone in the database.
|
||||
PutTombstone(ctx context.Context, tombstone *gtsmodel.Tombstone) Error
|
||||
|
||||
// DeleteTombstone deletes a tombstone with the given ID.
|
||||
DeleteTombstone(ctx context.Context, id string) Error
|
||||
}
|
|
@ -37,6 +37,7 @@
|
|||
"github.com/superseriousbusiness/gotosocial/internal/gtserror"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
)
|
||||
|
||||
/*
|
||||
|
@ -201,8 +202,21 @@ func (f *federator) AuthenticateFederatedRequest(ctx context.Context, requestedU
|
|||
// REMOTE ACCOUNT REQUEST WITHOUT KEY CACHED LOCALLY
|
||||
// the request is remote and we don't have the public key yet,
|
||||
// so we need to authenticate the request properly by dereferencing the remote key
|
||||
gone, err := f.CheckGone(ctx, requestingPublicKeyID)
|
||||
if err != nil {
|
||||
errWithCode := gtserror.NewErrorInternalError(fmt.Errorf("error checking for tombstone for %s: %s", requestingPublicKeyID, err))
|
||||
log.Debug(errWithCode)
|
||||
return nil, errWithCode
|
||||
}
|
||||
|
||||
if gone {
|
||||
errWithCode := gtserror.NewErrorGone(fmt.Errorf("account with public key %s is gone", requestingPublicKeyID))
|
||||
log.Debug(errWithCode)
|
||||
return nil, errWithCode
|
||||
}
|
||||
|
||||
log.Tracef("proceeding with dereference for uncached public key %s", requestingPublicKeyID)
|
||||
transport, err := f.transportController.NewTransportForUsername(ctx, requestedUsername)
|
||||
trans, err := f.transportController.NewTransportForUsername(ctx, requestedUsername)
|
||||
if err != nil {
|
||||
errWithCode := gtserror.NewErrorInternalError(fmt.Errorf("error creating transport for %s: %s", requestedUsername, err))
|
||||
log.Debug(errWithCode)
|
||||
|
@ -210,8 +224,21 @@ func (f *federator) AuthenticateFederatedRequest(ctx context.Context, requestedU
|
|||
}
|
||||
|
||||
// The actual http call to the remote server is made right here in the Dereference function.
|
||||
b, err := transport.Dereference(ctx, requestingPublicKeyID)
|
||||
b, err := trans.Dereference(ctx, requestingPublicKeyID)
|
||||
if err != nil {
|
||||
if errors.Is(err, transport.ErrGone) {
|
||||
// if we get a 410 error it means the account that owns this public key has been deleted;
|
||||
// we should add a tombstone to our database so that we can avoid trying to deref it in future
|
||||
if err := f.HandleGone(ctx, requestingPublicKeyID); err != nil {
|
||||
errWithCode := gtserror.NewErrorInternalError(fmt.Errorf("error marking account with public key %s as gone: %s", requestingPublicKeyID, err))
|
||||
log.Debug(errWithCode)
|
||||
return nil, errWithCode
|
||||
}
|
||||
errWithCode := gtserror.NewErrorGone(fmt.Errorf("account with public key %s is gone", requestingPublicKeyID))
|
||||
log.Debug(errWithCode)
|
||||
return nil, errWithCode
|
||||
}
|
||||
|
||||
errWithCode := gtserror.NewErrorUnauthorized(fmt.Errorf("error dereferencing public key %s: %s", requestingPublicKeyID, err))
|
||||
log.Debug(errWithCode)
|
||||
return nil, errWithCode
|
||||
|
|
|
@ -169,6 +169,13 @@ func (f *federator) AuthenticatePostInbox(ctx context.Context, w http.ResponseWr
|
|||
// if 400, 401, or 403, obey the interface by writing the header and bailing
|
||||
w.WriteHeader(errWithCode.Code())
|
||||
return ctx, false, nil
|
||||
case http.StatusGone:
|
||||
// if the requesting account has gone (http 410) then likely
|
||||
// inbox post was a delete, we can just write 202 and leave,
|
||||
// since we didn't know about the account anyway, so we can't
|
||||
// do any further processing
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
return ctx, false, nil
|
||||
default:
|
||||
// if not, there's been a proper error
|
||||
return ctx, false, err
|
||||
|
|
|
@ -182,6 +182,94 @@ func (suite *FederatingProtocolTestSuite) TestAuthenticatePostInbox() {
|
|||
suite.Equal(sendingAccount.Username, requestingAccount.Username)
|
||||
}
|
||||
|
||||
func (suite *FederatingProtocolTestSuite) TestAuthenticatePostGone() {
|
||||
// the activity we're gonna use
|
||||
activity := suite.testActivities["delete_https://somewhere.mysterious/users/rest_in_piss#main-key"]
|
||||
inboxAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
fedWorker := concurrency.NewWorkerPool[messages.FromFederator](-1, -1)
|
||||
|
||||
httpClient := testrig.NewMockHTTPClient(nil, "../../testrig/media")
|
||||
tc := testrig.NewTestTransportController(httpClient, suite.db, fedWorker)
|
||||
|
||||
// now setup module being tested, with the mock transport controller
|
||||
federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db, fedWorker), tc, suite.tc, testrig.NewTestMediaManager(suite.db, suite.storage))
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://localhost:8080/users/the_mighty_zork/inbox", nil)
|
||||
// we need these headers for the request to be validated
|
||||
request.Header.Set("Signature", activity.SignatureHeader)
|
||||
request.Header.Set("Date", activity.DateHeader)
|
||||
request.Header.Set("Digest", activity.DigestHeader)
|
||||
|
||||
verifier, err := httpsig.NewVerifier(request)
|
||||
suite.NoError(err)
|
||||
|
||||
ctx := context.Background()
|
||||
// by the time AuthenticatePostInbox is called, PostInboxRequestBodyHook should have already been called,
|
||||
// which should have set the account and username onto the request. We can replicate that behavior here:
|
||||
ctxWithAccount := context.WithValue(ctx, ap.ContextReceivingAccount, inboxAccount)
|
||||
ctxWithVerifier := context.WithValue(ctxWithAccount, ap.ContextRequestingPublicKeyVerifier, verifier)
|
||||
ctxWithSignature := context.WithValue(ctxWithVerifier, ap.ContextRequestingPublicKeySignature, activity.SignatureHeader)
|
||||
|
||||
// we can pass this recorder as a writer and read it back after
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
// trigger the function being tested, and return the new context it creates
|
||||
_, authed, err := federator.AuthenticatePostInbox(ctxWithSignature, recorder, request)
|
||||
suite.NoError(err)
|
||||
suite.False(authed)
|
||||
suite.Equal(http.StatusAccepted, recorder.Code)
|
||||
}
|
||||
|
||||
func (suite *FederatingProtocolTestSuite) TestAuthenticatePostGoneNoTombstoneYet() {
|
||||
// delete the relevant tombstone
|
||||
if err := suite.db.DeleteTombstone(context.Background(), suite.testTombstones["https://somewhere.mysterious/users/rest_in_piss#main-key"].ID); err != nil {
|
||||
suite.FailNow(err.Error())
|
||||
}
|
||||
|
||||
// the activity we're gonna use
|
||||
activity := suite.testActivities["delete_https://somewhere.mysterious/users/rest_in_piss#main-key"]
|
||||
inboxAccount := suite.testAccounts["local_account_1"]
|
||||
|
||||
fedWorker := concurrency.NewWorkerPool[messages.FromFederator](-1, -1)
|
||||
|
||||
httpClient := testrig.NewMockHTTPClient(nil, "../../testrig/media")
|
||||
tc := testrig.NewTestTransportController(httpClient, suite.db, fedWorker)
|
||||
|
||||
// now setup module being tested, with the mock transport controller
|
||||
federator := federation.NewFederator(suite.db, testrig.NewTestFederatingDB(suite.db, fedWorker), tc, suite.tc, testrig.NewTestMediaManager(suite.db, suite.storage))
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://localhost:8080/users/the_mighty_zork/inbox", nil)
|
||||
// we need these headers for the request to be validated
|
||||
request.Header.Set("Signature", activity.SignatureHeader)
|
||||
request.Header.Set("Date", activity.DateHeader)
|
||||
request.Header.Set("Digest", activity.DigestHeader)
|
||||
|
||||
verifier, err := httpsig.NewVerifier(request)
|
||||
suite.NoError(err)
|
||||
|
||||
ctx := context.Background()
|
||||
// by the time AuthenticatePostInbox is called, PostInboxRequestBodyHook should have already been called,
|
||||
// which should have set the account and username onto the request. We can replicate that behavior here:
|
||||
ctxWithAccount := context.WithValue(ctx, ap.ContextReceivingAccount, inboxAccount)
|
||||
ctxWithVerifier := context.WithValue(ctxWithAccount, ap.ContextRequestingPublicKeyVerifier, verifier)
|
||||
ctxWithSignature := context.WithValue(ctxWithVerifier, ap.ContextRequestingPublicKeySignature, activity.SignatureHeader)
|
||||
|
||||
// we can pass this recorder as a writer and read it back after
|
||||
recorder := httptest.NewRecorder()
|
||||
|
||||
// trigger the function being tested, and return the new context it creates
|
||||
_, authed, err := federator.AuthenticatePostInbox(ctxWithSignature, recorder, request)
|
||||
suite.NoError(err)
|
||||
suite.False(authed)
|
||||
suite.Equal(http.StatusAccepted, recorder.Code)
|
||||
|
||||
// there should be a tombstone in the db now for this account
|
||||
exists, err := suite.db.TombstoneExistsWithURI(ctx, "https://somewhere.mysterious/users/rest_in_piss#main-key")
|
||||
suite.NoError(err)
|
||||
suite.True(exists)
|
||||
}
|
||||
|
||||
func (suite *FederatingProtocolTestSuite) TestBlocked1() {
|
||||
fedWorker := concurrency.NewWorkerPool[messages.FromFederator](-1, -1)
|
||||
httpClient := testrig.NewMockHTTPClient(nil, "../../testrig/media")
|
||||
|
|
|
@ -36,6 +36,7 @@ type FederatorStandardTestSuite struct {
|
|||
testAccounts map[string]*gtsmodel.Account
|
||||
testStatuses map[string]*gtsmodel.Status
|
||||
testActivities map[string]testrig.ActivityWithSignature
|
||||
testTombstones map[string]*gtsmodel.Tombstone
|
||||
}
|
||||
|
||||
// SetupSuite sets some variables on the suite that we can use as consts (more or less) throughout
|
||||
|
@ -45,6 +46,7 @@ func (suite *FederatorStandardTestSuite) SetupSuite() {
|
|||
suite.tc = testrig.NewTestTypeConverter(suite.db)
|
||||
suite.testAccounts = testrig.NewTestAccounts()
|
||||
suite.testStatuses = testrig.NewTestStatuses()
|
||||
suite.testTombstones = testrig.NewTestTombstones()
|
||||
}
|
||||
|
||||
func (suite *FederatorStandardTestSuite) SetupTest() {
|
||||
|
|
34
internal/federation/gone.go
Normal file
34
internal/federation/gone.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package federation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
)
|
||||
|
||||
// CheckGone checks if a tombstone exists in the database for AP Actor or Object with the given uri.
|
||||
func (f *federator) CheckGone(ctx context.Context, uri *url.URL) (bool, error) {
|
||||
return f.db.TombstoneExistsWithURI(ctx, uri.String())
|
||||
}
|
||||
|
||||
// HandleGone puts a tombstone in the database, which marks an AP Actor or Object with the given uri as gone.
|
||||
func (f *federator) HandleGone(ctx context.Context, uri *url.URL) error {
|
||||
tombstoneID, err := id.NewULID()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("HandleGone: error generating id for new tombstone %s: %s", uri, err)
|
||||
log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
tombstone := >smodel.Tombstone{
|
||||
ID: tombstoneID,
|
||||
Domain: uri.Host,
|
||||
URI: uri.String(),
|
||||
}
|
||||
|
||||
return f.db.PutTombstone(ctx, tombstone)
|
||||
}
|
|
@ -161,3 +161,16 @@ func NewErrorUnprocessableEntity(original error, helpText ...string) WithCode {
|
|||
code: http.StatusUnprocessableEntity,
|
||||
}
|
||||
}
|
||||
|
||||
// NewErrorGone returns an ErrorWithCode 410 with the given original error and optional help text.
|
||||
func NewErrorGone(original error, helpText ...string) WithCode {
|
||||
safe := http.StatusText(http.StatusGone)
|
||||
if helpText != nil {
|
||||
safe = safe + ": " + strings.Join(helpText, ": ")
|
||||
}
|
||||
return withCode{
|
||||
original: original,
|
||||
safe: errors.New(safe),
|
||||
code: http.StatusGone,
|
||||
}
|
||||
}
|
||||
|
|
38
internal/gtsmodel/tombstone.go
Normal file
38
internal/gtsmodel/tombstone.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
GoToSocial
|
||||
Copyright (C) 2021-2022 GoToSocial Authors admin@gotosocial.org
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
// Package gtsmodel contains types used *internally* by GoToSocial and added/removed/selected from the database.
|
||||
// These types should never be serialized and/or sent out via public APIs, as they contain sensitive information.
|
||||
// The annotation used on these structs is for handling them via the bun-db ORM.
|
||||
// See here for more info on bun model annotations: https://bun.uptrace.dev/guide/models.html
|
||||
package gtsmodel
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Tombstone represents either a remote fediverse account, object, activity etc which has been deleted.
|
||||
// It's useful in cases where a remote account has been deleted, and we don't want to keep trying to process
|
||||
// subsequent activities from that account, or deletes which target it.
|
||||
type Tombstone struct {
|
||||
ID string `validate:"required,ulid" bun:"type:CHAR(26),pk,nullzero,notnull,unique"` // id of this item in the database
|
||||
CreatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item created
|
||||
UpdatedAt time.Time `validate:"-" bun:"type:timestamptz,nullzero,notnull,default:current_timestamp"` // when was item last updated
|
||||
Domain string `validate:"omitempty,fqdn" bun:",nullzero,notnull"` // Domain of the Object/Actor.
|
||||
URI string `validate:"required,url" bun:",nullzero,notnull,unique"` // ActivityPub URI for this Object/Actor.
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
@ -30,6 +31,12 @@
|
|||
"github.com/superseriousbusiness/gotosocial/internal/uris"
|
||||
)
|
||||
|
||||
// ErrGone is returned from Dereference when the remote resource returns 410 GONE.
|
||||
// This is useful in cases where we're processing a delete of a resource that's already
|
||||
// been removed from the remote server, so we know we don't need to keep trying to
|
||||
// dereference it.
|
||||
var ErrGone = errors.New("remote resource returned HTTP code 410 GONE")
|
||||
|
||||
func (t *transport) Dereference(ctx context.Context, iri *url.URL) ([]byte, error) {
|
||||
// if the request is to us, we can shortcut for certain URIs rather than going through
|
||||
// the normal request flow, thereby saving time and energy
|
||||
|
@ -66,10 +73,12 @@ func (t *transport) Dereference(ctx context.Context, iri *url.URL) ([]byte, erro
|
|||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
// Check for an expected status code
|
||||
if rsp.StatusCode != http.StatusOK {
|
||||
switch rsp.StatusCode {
|
||||
case http.StatusOK:
|
||||
return io.ReadAll(rsp.Body)
|
||||
case http.StatusGone:
|
||||
return nil, ErrGone
|
||||
default:
|
||||
return nil, fmt.Errorf("GET request to %s failed (%d): %s", iriStr, rsp.StatusCode, rsp.Status)
|
||||
}
|
||||
|
||||
return io.ReadAll(rsp.Body)
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
>smodel.RouterSession{},
|
||||
>smodel.Token{},
|
||||
>smodel.Client{},
|
||||
>smodel.Tombstone{},
|
||||
}
|
||||
|
||||
// NewTestDB returns a new initialized, empty database for testing.
|
||||
|
@ -240,6 +241,12 @@ func StandardDBSetup(db db.DB, accounts map[string]*gtsmodel.Account) {
|
|||
}
|
||||
}
|
||||
|
||||
for _, v := range NewTestTombstones() {
|
||||
if err := db.Put(ctx, v); err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := db.CreateInstanceAccount(ctx); err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
|
|
|
@ -583,6 +583,18 @@ func NewTestAccounts() map[string]*gtsmodel.Account {
|
|||
return accounts
|
||||
}
|
||||
|
||||
func NewTestTombstones() map[string]*gtsmodel.Tombstone {
|
||||
return map[string]*gtsmodel.Tombstone{
|
||||
"https://somewhere.mysterious/users/rest_in_piss#main-key": {
|
||||
ID: "01GHBTVE9HQPPBDH2W5VH2DGN4",
|
||||
CreatedAt: TimeMustParse("2021-11-09T19:33:45Z"),
|
||||
UpdatedAt: TimeMustParse("2021-11-09T19:33:45Z"),
|
||||
Domain: "somewhere.mysterious",
|
||||
URI: "https://somewhere.mysterious/users/rest_in_piss#main-key",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewTestAttachments returns a map of attachments keyed according to which account
|
||||
// and status they belong to, and which attachment number of that status they are.
|
||||
func NewTestAttachments() map[string]*gtsmodel.MediaAttachment {
|
||||
|
@ -1835,6 +1847,16 @@ func NewTestActivities(accounts map[string]*gtsmodel.Account) map[string]Activit
|
|||
)
|
||||
announceForwarded2ZorkSig, announceForwarded2ZorkDigest, announceForwarded2ZorkDate := GetSignatureForActivity(announceForwarded2Zork, accounts["remote_account_1"].PublicKeyURI, accounts["remote_account_1"].PrivateKey, URLMustParse(accounts["local_account_1"].InboxURI))
|
||||
|
||||
deleteForRemoteAccount3 := newAPDelete(
|
||||
URLMustParse("https://somewhere.mysterious/users/rest_in_piss"),
|
||||
URLMustParse("https://somewhere.mysterious/users/rest_in_piss"),
|
||||
TimeMustParse("2022-07-13T12:13:12+02:00"),
|
||||
URLMustParse(accounts["local_account_1"].URI),
|
||||
)
|
||||
// it doesn't really matter what key we use to sign this, since we're not going to be able to verify if anyway
|
||||
keyToSignDelete := accounts["remote_account_1"].PrivateKey
|
||||
deleteForRemoteAccount3Sig, deleteForRemoteAccount3Digest, deleteForRemoteAccount3Date := GetSignatureForActivity(deleteForRemoteAccount3, "https://somewhere.mysterious/users/rest_in_piss#main-key", keyToSignDelete, URLMustParse(accounts["local_account_1"].InboxURI))
|
||||
|
||||
return map[string]ActivityWithSignature{
|
||||
"dm_for_zork": {
|
||||
Activity: createDmForZork,
|
||||
|
@ -1878,6 +1900,12 @@ func NewTestActivities(accounts map[string]*gtsmodel.Account) map[string]Activit
|
|||
DigestHeader: announceForwarded2ZorkDigest,
|
||||
DateHeader: announceForwarded2ZorkDate,
|
||||
},
|
||||
"delete_https://somewhere.mysterious/users/rest_in_piss#main-key": {
|
||||
Activity: deleteForRemoteAccount3,
|
||||
SignatureHeader: deleteForRemoteAccount3Sig,
|
||||
DigestHeader: deleteForRemoteAccount3Digest,
|
||||
DateHeader: deleteForRemoteAccount3Date,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3151,3 +3179,25 @@ func newAPAnnounce(announceID *url.URL, announceActor *url.URL, announcePublishe
|
|||
|
||||
return announce
|
||||
}
|
||||
|
||||
func newAPDelete(deleteTarget *url.URL, deleteActor *url.URL, deletePublished time.Time, deleteTo *url.URL) vocab.ActivityStreamsDelete {
|
||||
delete := streams.NewActivityStreamsDelete()
|
||||
|
||||
objectProp := streams.NewActivityStreamsObjectProperty()
|
||||
objectProp.AppendIRI(deleteTarget)
|
||||
delete.SetActivityStreamsObject(objectProp)
|
||||
|
||||
to := streams.NewActivityStreamsToProperty()
|
||||
to.AppendIRI(deleteTo)
|
||||
delete.SetActivityStreamsTo(to)
|
||||
|
||||
actor := streams.NewActivityStreamsActorProperty()
|
||||
actor.AppendIRI(deleteActor)
|
||||
delete.SetActivityStreamsActor(actor)
|
||||
|
||||
published := streams.NewActivityStreamsPublishedProperty()
|
||||
published.Set(deletePublished)
|
||||
delete.SetActivityStreamsPublished(published)
|
||||
|
||||
return delete
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
"github.com/superseriousbusiness/gotosocial/internal/concurrency"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/federation"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/messages"
|
||||
"github.com/superseriousbusiness/gotosocial/internal/transport"
|
||||
|
@ -65,6 +66,7 @@ type MockHTTPClient struct {
|
|||
testRemoteServices map[string]vocab.ActivityStreamsService
|
||||
testRemoteAttachments map[string]RemoteAttachmentFile
|
||||
testRemoteEmojis map[string]vocab.TootEmoji
|
||||
testTombstones map[string]*gtsmodel.Tombstone
|
||||
|
||||
SentMessages sync.Map
|
||||
}
|
||||
|
@ -92,6 +94,7 @@ func NewMockHTTPClient(do func(req *http.Request) (*http.Response, error), relat
|
|||
mockHTTPClient.testRemoteServices = NewTestFediServices()
|
||||
mockHTTPClient.testRemoteAttachments = NewTestFediAttachments(relativeMediaPath)
|
||||
mockHTTPClient.testRemoteEmojis = NewTestFediEmojis()
|
||||
mockHTTPClient.testTombstones = NewTestTombstones()
|
||||
|
||||
mockHTTPClient.do = func(req *http.Request) (*http.Response, error) {
|
||||
responseCode := http.StatusNotFound
|
||||
|
@ -193,6 +196,11 @@ func NewMockHTTPClient(do func(req *http.Request) (*http.Response, error), relat
|
|||
responseBytes = attachment.Data
|
||||
responseContentType = attachment.ContentType
|
||||
responseContentLength = len(attachment.Data)
|
||||
} else if _, ok := mockHTTPClient.testTombstones[req.URL.String()]; ok {
|
||||
responseCode = http.StatusGone
|
||||
responseBytes = []byte{}
|
||||
responseContentType = "text/html"
|
||||
responseContentLength = 0
|
||||
}
|
||||
|
||||
log.Debugf("returning response %s", string(responseBytes))
|
||||
|
|
9
vendor/codeberg.org/gruf/go-cache/v3/LICENSE
generated
vendored
Normal file
9
vendor/codeberg.org/gruf/go-cache/v3/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 gruf
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
341
vendor/codeberg.org/gruf/go-cache/v3/result/cache.go
generated
vendored
Normal file
341
vendor/codeberg.org/gruf/go-cache/v3/result/cache.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
|||
package result
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-cache/v3/ttl"
|
||||
)
|
||||
|
||||
// Cache ...
|
||||
type Cache[Value any] struct {
|
||||
cache ttl.Cache[int64, result[Value]] // underlying result cache
|
||||
lookups structKeys // pre-determined struct lookups
|
||||
copy func(Value) Value // copies a Value type
|
||||
next int64 // update key counter
|
||||
}
|
||||
|
||||
// New returns a new initialized Cache, with given lookups and underlying value copy function.
|
||||
func New[Value any](lookups []string, copy func(Value) Value) *Cache[Value] {
|
||||
return NewSized(lookups, copy, 64)
|
||||
}
|
||||
|
||||
// NewSized returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
|
||||
func NewSized[Value any](lookups []string, copy func(Value) Value, cap int) *Cache[Value] {
|
||||
var z Value
|
||||
|
||||
// Determine generic type
|
||||
t := reflect.TypeOf(z)
|
||||
|
||||
// Iteratively deref pointer type
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
// Ensure that this is a struct type
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("generic parameter type must be struct (or ptr to)")
|
||||
}
|
||||
|
||||
// Allocate new cache object
|
||||
c := &Cache[Value]{copy: copy}
|
||||
c.lookups = make([]keyFields, len(lookups))
|
||||
|
||||
for i, lookup := range lookups {
|
||||
// Generate keyed field info for lookup
|
||||
c.lookups[i].pkeys = make(map[string]int64, cap)
|
||||
c.lookups[i].lookup = lookup
|
||||
c.lookups[i].populate(t)
|
||||
}
|
||||
|
||||
// Create and initialize underlying cache
|
||||
c.cache.Init(0, cap, 0)
|
||||
c.SetEvictionCallback(nil)
|
||||
c.SetInvalidateCallback(nil)
|
||||
return c
|
||||
}
|
||||
|
||||
// Start will start the cache background eviction routine with given sweep frequency. If already
|
||||
// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started.
|
||||
func (c *Cache[Value]) Start(freq time.Duration) bool {
|
||||
return c.cache.Start(freq)
|
||||
}
|
||||
|
||||
// Stop will stop cache background eviction routine. If not running this
|
||||
// is a no-op. This will block until the eviction routine has stopped.
|
||||
func (c *Cache[Value]) Stop() bool {
|
||||
return c.cache.Stop()
|
||||
}
|
||||
|
||||
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items
|
||||
// in the cache, this will simply add the change in TTL to their current expiry time.
|
||||
func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) {
|
||||
c.cache.SetTTL(ttl, update)
|
||||
}
|
||||
|
||||
// SetEvictionCallback sets the eviction callback to the provided hook.
|
||||
func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
|
||||
if hook == nil {
|
||||
// Ensure non-nil hook.
|
||||
hook = func(Value) {}
|
||||
}
|
||||
c.cache.SetEvictionCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
||||
for _, key := range item.Value.Keys {
|
||||
// Delete key->pkey lookup
|
||||
pkeys := key.fields.pkeys
|
||||
delete(pkeys, key.value)
|
||||
}
|
||||
|
||||
if item.Value.Error != nil {
|
||||
// Skip error hooks
|
||||
return
|
||||
}
|
||||
|
||||
// Call user hook.
|
||||
hook(item.Value.Value)
|
||||
})
|
||||
}
|
||||
|
||||
// SetInvalidateCallback sets the invalidate callback to the provided hook.
|
||||
func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
|
||||
if hook == nil {
|
||||
// Ensure non-nil hook.
|
||||
hook = func(Value) {}
|
||||
}
|
||||
c.cache.SetInvalidateCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
||||
for _, key := range item.Value.Keys {
|
||||
if key.fields != nil {
|
||||
// Delete key->pkey lookup
|
||||
pkeys := key.fields.pkeys
|
||||
delete(pkeys, key.value)
|
||||
}
|
||||
}
|
||||
|
||||
if item.Value.Error != nil {
|
||||
// Skip error hooks
|
||||
return
|
||||
}
|
||||
|
||||
// Call user hook.
|
||||
hook(item.Value.Value)
|
||||
})
|
||||
}
|
||||
|
||||
// Load ...
|
||||
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
|
||||
var (
|
||||
zero Value
|
||||
res result[Value]
|
||||
)
|
||||
|
||||
// Get lookup map by name.
|
||||
kfields := c.getFields(lookup)
|
||||
lmap := kfields.pkeys
|
||||
|
||||
// Generate cache key string.
|
||||
ckey := genkey(keyParts...)
|
||||
|
||||
// Acquire cache lock
|
||||
c.cache.Lock()
|
||||
|
||||
// Look for primary key
|
||||
pkey, ok := lmap[ckey]
|
||||
|
||||
if ok {
|
||||
// Fetch the result for primary key
|
||||
entry, _ := c.cache.Cache.Get(pkey)
|
||||
res = entry.Value
|
||||
}
|
||||
|
||||
// Done with lock
|
||||
c.cache.Unlock()
|
||||
|
||||
if !ok {
|
||||
// Generate new result from fresh load.
|
||||
res.Value, res.Error = load()
|
||||
|
||||
if res.Error != nil {
|
||||
// This load returned an error, only
|
||||
// store this item under provided key.
|
||||
res.Keys = []cacheKey{{
|
||||
value: ckey,
|
||||
fields: kfields,
|
||||
}}
|
||||
} else {
|
||||
// This was a successful load, generate keys.
|
||||
res.Keys = c.lookups.generate(res.Value)
|
||||
}
|
||||
|
||||
// Acquire cache lock.
|
||||
c.cache.Lock()
|
||||
defer c.cache.Unlock()
|
||||
|
||||
// Attempt to cache this result.
|
||||
if key, ok := c.storeResult(res); !ok {
|
||||
return zero, ConflictError{key}
|
||||
}
|
||||
}
|
||||
|
||||
// Catch and return error
|
||||
if res.Error != nil {
|
||||
return zero, res.Error
|
||||
}
|
||||
|
||||
// Return a copy of value from cache
|
||||
return c.copy(res.Value), nil
|
||||
}
|
||||
|
||||
// Store ...
|
||||
func (c *Cache[Value]) Store(value Value, store func() error) error {
|
||||
// Attempt to store this value.
|
||||
if err := store(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Prepare cached result.
|
||||
result := result[Value]{
|
||||
Keys: c.lookups.generate(value),
|
||||
Value: c.copy(value),
|
||||
Error: nil,
|
||||
}
|
||||
|
||||
// Acquire cache lock.
|
||||
c.cache.Lock()
|
||||
defer c.cache.Unlock()
|
||||
|
||||
// Attempt to cache result, only return conflict
|
||||
// error if the appropriate flag has been set.
|
||||
if key, ok := c.storeResult(result); !ok {
|
||||
return ConflictError{key}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has ...
|
||||
func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
|
||||
var res result[Value]
|
||||
|
||||
// Get lookup map by name.
|
||||
kfields := c.getFields(lookup)
|
||||
lmap := kfields.pkeys
|
||||
|
||||
// Generate cache key string.
|
||||
ckey := genkey(keyParts...)
|
||||
|
||||
// Acquire cache lock
|
||||
c.cache.Lock()
|
||||
|
||||
// Look for primary key
|
||||
pkey, ok := lmap[ckey]
|
||||
|
||||
if ok {
|
||||
// Fetch the result for primary key
|
||||
entry, _ := c.cache.Cache.Get(pkey)
|
||||
res = entry.Value
|
||||
}
|
||||
|
||||
// Done with lock
|
||||
c.cache.Unlock()
|
||||
|
||||
// Check for non-error result.
|
||||
return ok && (res.Error == nil)
|
||||
}
|
||||
|
||||
// Invalidate ...
|
||||
func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
|
||||
// Get lookup map by name.
|
||||
kfields := c.getFields(lookup)
|
||||
lmap := kfields.pkeys
|
||||
|
||||
// Generate cache key string.
|
||||
ckey := genkey(keyParts...)
|
||||
|
||||
// Look for primary key
|
||||
c.cache.Lock()
|
||||
pkey, ok := lmap[ckey]
|
||||
c.cache.Unlock()
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Invalid by primary key
|
||||
c.cache.Invalidate(pkey)
|
||||
}
|
||||
|
||||
// Clear empties the cache, calling the invalidate callback.
|
||||
func (c *Cache[Value]) Clear() {
|
||||
c.cache.Clear()
|
||||
}
|
||||
|
||||
// Len ...
|
||||
func (c *Cache[Value]) Len() int {
|
||||
return c.cache.Cache.Len()
|
||||
}
|
||||
|
||||
// Cap ...
|
||||
func (c *Cache[Value]) Cap() int {
|
||||
return c.cache.Cache.Cap()
|
||||
}
|
||||
|
||||
func (c *Cache[Value]) getFields(name string) *keyFields {
|
||||
for _, k := range c.lookups {
|
||||
// Find key fields with name
|
||||
if k.lookup == name {
|
||||
return &k
|
||||
}
|
||||
}
|
||||
panic("invalid lookup: " + name)
|
||||
}
|
||||
|
||||
func (c *Cache[Value]) storeResult(res result[Value]) (string, bool) {
|
||||
for _, key := range res.Keys {
|
||||
pkeys := key.fields.pkeys
|
||||
|
||||
// Look for cache primary key
|
||||
pkey, ok := pkeys[key.value]
|
||||
|
||||
if ok {
|
||||
// Look for overlap with non error keys,
|
||||
// as an overlap for some but not all keys
|
||||
// could produce inconsistent results.
|
||||
entry, _ := c.cache.Cache.Get(pkey)
|
||||
if entry.Value.Error == nil {
|
||||
return key.value, false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get primary key
|
||||
pkey := c.next
|
||||
c.next++
|
||||
|
||||
// Store all primary key lookups
|
||||
for _, key := range res.Keys {
|
||||
pkeys := key.fields.pkeys
|
||||
pkeys[key.value] = pkey
|
||||
}
|
||||
|
||||
// Store main entry under primary key, using evict hook if needed
|
||||
c.cache.Cache.SetWithHook(pkey, &ttl.Entry[int64, result[Value]]{
|
||||
Expiry: time.Now().Add(c.cache.TTL),
|
||||
Key: pkey,
|
||||
Value: res,
|
||||
}, func(_ int64, item *ttl.Entry[int64, result[Value]]) {
|
||||
c.cache.Evict(item)
|
||||
})
|
||||
|
||||
return "", true
|
||||
}
|
||||
|
||||
type result[Value any] struct {
|
||||
// keys accessible under
|
||||
Keys []cacheKey
|
||||
|
||||
// cached value
|
||||
Value Value
|
||||
|
||||
// cached error
|
||||
Error error
|
||||
}
|
22
vendor/codeberg.org/gruf/go-cache/v3/result/error.go
generated
vendored
Normal file
22
vendor/codeberg.org/gruf/go-cache/v3/result/error.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package result
|
||||
|
||||
import "errors"
|
||||
|
||||
// ErrUnkownLookup ...
|
||||
var ErrUnknownLookup = errors.New("unknown lookup identifier")
|
||||
|
||||
// IsConflictErr returns whether error is due to key conflict.
|
||||
func IsConflictErr(err error) bool {
|
||||
_, ok := err.(ConflictError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// ConflictError is returned on cache key conflict.
|
||||
type ConflictError struct {
|
||||
Key string
|
||||
}
|
||||
|
||||
// Error returns the message for this key conflict error.
|
||||
func (c ConflictError) Error() string {
|
||||
return "cache conflict for key \"" + c.Key + "\""
|
||||
}
|
184
vendor/codeberg.org/gruf/go-cache/v3/result/key.go
generated
vendored
Normal file
184
vendor/codeberg.org/gruf/go-cache/v3/result/key.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
package result
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"codeberg.org/gruf/go-byteutil"
|
||||
"codeberg.org/gruf/go-mangler"
|
||||
)
|
||||
|
||||
// structKeys provides convience methods for a list
|
||||
// of struct field combinations used for cache keys.
|
||||
type structKeys []keyFields
|
||||
|
||||
// get fetches the key-fields for given lookup (else, panics).
|
||||
func (sk structKeys) get(lookup string) *keyFields {
|
||||
for i := range sk {
|
||||
if sk[i].lookup == lookup {
|
||||
return &sk[i]
|
||||
}
|
||||
}
|
||||
panic("unknown lookup: \"" + lookup + "\"")
|
||||
}
|
||||
|
||||
// generate will calculate the value string for each required
|
||||
// cache key as laid-out by the receiving structKeys{}.
|
||||
func (sk structKeys) generate(a any) []cacheKey {
|
||||
// Get reflected value in order
|
||||
// to access the struct fields
|
||||
v := reflect.ValueOf(a)
|
||||
|
||||
// Iteratively deref pointer value
|
||||
for v.Kind() == reflect.Pointer {
|
||||
if v.IsNil() {
|
||||
panic("nil ptr")
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// Preallocate expected slice of keys
|
||||
keys := make([]cacheKey, len(sk))
|
||||
|
||||
// Acquire byte buffer
|
||||
buf := bufpool.Get().(*byteutil.Buffer)
|
||||
defer bufpool.Put(buf)
|
||||
|
||||
for i := range sk {
|
||||
// Reset buffer
|
||||
buf.B = buf.B[:0]
|
||||
|
||||
// Set the key-fields reference
|
||||
keys[i].fields = &sk[i]
|
||||
|
||||
// Calculate cache-key value
|
||||
keys[i].populate(buf, v)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// cacheKey represents an actual cache key.
|
||||
type cacheKey struct {
|
||||
// value is the actual string representing
|
||||
// this cache key for hashmap lookups.
|
||||
value string
|
||||
|
||||
// fieldsRO is a read-only slice (i.e. we should
|
||||
// NOT be modifying them, only using for reference)
|
||||
// of struct fields encapsulated by this cache key.
|
||||
fields *keyFields
|
||||
}
|
||||
|
||||
// populate will calculate the cache key's value string for given
|
||||
// value's reflected information. Passed encoder is for string building.
|
||||
func (k *cacheKey) populate(buf *byteutil.Buffer, v reflect.Value) {
|
||||
// Append each field value to buffer.
|
||||
for _, idx := range k.fields.fields {
|
||||
fv := v.Field(idx)
|
||||
fi := fv.Interface()
|
||||
buf.B = mangler.Append(buf.B, fi)
|
||||
buf.B = append(buf.B, '.')
|
||||
}
|
||||
|
||||
// Drop last '.'
|
||||
buf.Truncate(1)
|
||||
|
||||
// Create string copy from buf
|
||||
k.value = string(buf.B)
|
||||
}
|
||||
|
||||
// keyFields represents a list of struct fields
|
||||
// encompassed in a single cache key, the string name
|
||||
// of the lookup, and the lookup map to primary keys.
|
||||
type keyFields struct {
|
||||
// lookup is the calculated (well, provided)
|
||||
// cache key lookup, consisting of dot sep'd
|
||||
// struct field names.
|
||||
lookup string
|
||||
|
||||
// fields is a slice of runtime struct field
|
||||
// indices, of the fields encompassed by this key.
|
||||
fields []int
|
||||
|
||||
// pkeys is a lookup of stored struct key values
|
||||
// to the primary cache lookup key (int64).
|
||||
pkeys map[string]int64
|
||||
}
|
||||
|
||||
// populate will populate this keyFields{} object's .fields member by determining
|
||||
// the field names from the given lookup, and querying given reflected type to get
|
||||
// the runtime field indices for each of the fields. this speeds-up future value lookups.
|
||||
func (kf *keyFields) populate(t reflect.Type) {
|
||||
// Split dot-separated lookup to get
|
||||
// the individual struct field names
|
||||
names := strings.Split(kf.lookup, ".")
|
||||
if len(names) == 0 {
|
||||
panic("no key fields specified")
|
||||
}
|
||||
|
||||
// Pre-allocate slice of expected length
|
||||
kf.fields = make([]int, len(names))
|
||||
|
||||
for i, name := range names {
|
||||
// Get field info for given name
|
||||
ft, ok := t.FieldByName(name)
|
||||
if !ok {
|
||||
panic("no field found for name: \"" + name + "\"")
|
||||
}
|
||||
|
||||
// Check field is usable
|
||||
if !isExported(name) {
|
||||
panic("field must be exported")
|
||||
}
|
||||
|
||||
// Set the runtime field index
|
||||
kf.fields[i] = ft.Index[0]
|
||||
}
|
||||
}
|
||||
|
||||
// genkey generates a cache key for given key values.
|
||||
func genkey(parts ...any) string {
|
||||
if len(parts) < 1 {
|
||||
// Panic to prevent annoying usecase
|
||||
// where user forgets to pass lookup
|
||||
// and instead only passes a key part,
|
||||
// e.g. cache.Get("key")
|
||||
// which then always returns false.
|
||||
panic("no key parts provided")
|
||||
}
|
||||
|
||||
// Acquire buffer and reset
|
||||
buf := bufpool.Get().(*byteutil.Buffer)
|
||||
defer bufpool.Put(buf)
|
||||
buf.Reset()
|
||||
|
||||
// Encode each key part
|
||||
for _, part := range parts {
|
||||
buf.B = mangler.Append(buf.B, part)
|
||||
buf.B = append(buf.B, '.')
|
||||
}
|
||||
|
||||
// Drop last '.'
|
||||
buf.Truncate(1)
|
||||
|
||||
// Return string copy
|
||||
return string(buf.B)
|
||||
}
|
||||
|
||||
// isExported checks whether function name is exported.
|
||||
func isExported(fnName string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(fnName)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
||||
|
||||
// bufpool provides a memory pool of byte
|
||||
// buffers use when encoding key types.
|
||||
var bufpool = sync.Pool{
|
||||
New: func() any {
|
||||
return &byteutil.Buffer{B: make([]byte, 0, 512)}
|
||||
},
|
||||
}
|
20
vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go
generated
vendored
Normal file
20
vendor/codeberg.org/gruf/go-cache/v3/ttl/schedule.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package ttl
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-sched"
|
||||
)
|
||||
|
||||
// scheduler is the global cache runtime scheduler
|
||||
// for handling regular cache evictions.
|
||||
var scheduler sched.Scheduler
|
||||
|
||||
// schedule will given sweep routine to the global scheduler, and start global scheduler.
|
||||
func schedule(sweep func(time.Time), freq time.Duration) func() {
|
||||
if !scheduler.Running() {
|
||||
// ensure running
|
||||
_ = scheduler.Start()
|
||||
}
|
||||
return scheduler.Schedule(sched.NewJob(sweep).Every(freq))
|
||||
}
|
412
vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go
generated
vendored
Normal file
412
vendor/codeberg.org/gruf/go-cache/v3/ttl/ttl.go
generated
vendored
Normal file
|
@ -0,0 +1,412 @@
|
|||
package ttl
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"codeberg.org/gruf/go-maps"
|
||||
)
|
||||
|
||||
// Entry represents an item in the cache, with it's currently calculated Expiry time.
|
||||
type Entry[Key comparable, Value any] struct {
|
||||
Key Key
|
||||
Value Value
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
|
||||
type Cache[Key comparable, Value any] struct {
|
||||
// TTL is the cache item TTL.
|
||||
TTL time.Duration
|
||||
|
||||
// Evict is the hook that is called when an item is evicted from the cache, includes manual delete.
|
||||
Evict func(*Entry[Key, Value])
|
||||
|
||||
// Invalid is the hook that is called when an item's data in the cache is invalidated.
|
||||
Invalid func(*Entry[Key, Value])
|
||||
|
||||
// Cache is the underlying hashmap used for this cache.
|
||||
Cache maps.LRUMap[Key, *Entry[Key, Value]]
|
||||
|
||||
// stop is the eviction routine cancel func.
|
||||
stop func()
|
||||
|
||||
// pool is a memory pool of entry objects.
|
||||
pool []*Entry[Key, Value]
|
||||
|
||||
// Embedded mutex.
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// New returns a new initialized Cache with given initial length, maximum capacity and item TTL.
|
||||
func New[K comparable, V any](len, cap int, ttl time.Duration) *Cache[K, V] {
|
||||
c := new(Cache[K, V])
|
||||
c.Init(len, cap, ttl)
|
||||
return c
|
||||
}
|
||||
|
||||
// Init will initialize this cache with given initial length, maximum capacity and item TTL.
|
||||
func (c *Cache[K, V]) Init(len, cap int, ttl time.Duration) {
|
||||
if ttl <= 0 {
|
||||
// Default duration
|
||||
ttl = time.Second * 5
|
||||
}
|
||||
c.TTL = ttl
|
||||
c.SetEvictionCallback(nil)
|
||||
c.SetInvalidateCallback(nil)
|
||||
c.Cache.Init(len, cap)
|
||||
}
|
||||
|
||||
// Start: implements cache.Cache's Start().
|
||||
func (c *Cache[K, V]) Start(freq time.Duration) (ok bool) {
|
||||
// Nothing to start
|
||||
if freq <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Safely start
|
||||
c.Lock()
|
||||
|
||||
if ok = c.stop == nil; ok {
|
||||
// Not yet running, schedule us
|
||||
c.stop = schedule(c.Sweep, freq)
|
||||
}
|
||||
|
||||
// Done with lock
|
||||
c.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Stop: implements cache.Cache's Stop().
|
||||
func (c *Cache[K, V]) Stop() (ok bool) {
|
||||
// Safely stop
|
||||
c.Lock()
|
||||
|
||||
if ok = c.stop != nil; ok {
|
||||
// We're running, cancel evicts
|
||||
c.stop()
|
||||
c.stop = nil
|
||||
}
|
||||
|
||||
// Done with lock
|
||||
c.Unlock()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sweep attempts to evict expired items (with callback!) from cache.
|
||||
func (c *Cache[K, V]) Sweep(now time.Time) {
|
||||
var after int
|
||||
|
||||
// Sweep within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Sentinel value
|
||||
after = -1
|
||||
|
||||
// The cache will be ordered by expiry date, we iterate until we reach the index of
|
||||
// the youngest item that hsa expired, as all succeeding items will also be expired.
|
||||
c.Cache.RangeIf(0, c.Cache.Len(), func(i int, _ K, item *Entry[K, V]) bool {
|
||||
if now.After(item.Expiry) {
|
||||
after = i
|
||||
|
||||
// All older than this (including) can be dropped
|
||||
return false
|
||||
}
|
||||
|
||||
// Continue looping
|
||||
return true
|
||||
})
|
||||
|
||||
if after == -1 {
|
||||
// No Truncation needed
|
||||
return
|
||||
}
|
||||
|
||||
// Truncate items, calling eviction hook
|
||||
c.truncate(c.Cache.Len()-after, c.Evict)
|
||||
}
|
||||
|
||||
// SetEvictionCallback: implements cache.Cache's SetEvictionCallback().
|
||||
func (c *Cache[K, V]) SetEvictionCallback(hook func(*Entry[K, V])) {
|
||||
// Ensure non-nil hook
|
||||
if hook == nil {
|
||||
hook = func(*Entry[K, V]) {}
|
||||
}
|
||||
|
||||
// Update within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Update hook
|
||||
c.Evict = hook
|
||||
}
|
||||
|
||||
// SetInvalidateCallback: implements cache.Cache's SetInvalidateCallback().
|
||||
func (c *Cache[K, V]) SetInvalidateCallback(hook func(*Entry[K, V])) {
|
||||
// Ensure non-nil hook
|
||||
if hook == nil {
|
||||
hook = func(*Entry[K, V]) {}
|
||||
}
|
||||
|
||||
// Update within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Update hook
|
||||
c.Invalid = hook
|
||||
}
|
||||
|
||||
// SetTTL: implements cache.Cache's SetTTL().
|
||||
func (c *Cache[K, V]) SetTTL(ttl time.Duration, update bool) {
|
||||
if ttl < 0 {
|
||||
panic("ttl must be greater than zero")
|
||||
}
|
||||
|
||||
// Update within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Set updated TTL
|
||||
diff := ttl - c.TTL
|
||||
c.TTL = ttl
|
||||
|
||||
if update {
|
||||
// Update existing cache entries with new expiry time
|
||||
c.Cache.Range(0, c.Cache.Len(), func(i int, key K, item *Entry[K, V]) {
|
||||
item.Expiry = item.Expiry.Add(diff)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Get: implements cache.Cache's Get().
|
||||
func (c *Cache[K, V]) Get(key K) (V, bool) {
|
||||
// Read within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check for item in cache
|
||||
item, ok := c.Cache.Get(key)
|
||||
if !ok {
|
||||
var value V
|
||||
return value, false
|
||||
}
|
||||
|
||||
// Update item expiry and return
|
||||
item.Expiry = time.Now().Add(c.TTL)
|
||||
return item.Value, true
|
||||
}
|
||||
|
||||
// Add: implements cache.Cache's Add().
|
||||
func (c *Cache[K, V]) Add(key K, value V) bool {
|
||||
// Write within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// If already cached, return
|
||||
if c.Cache.Has(key) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Alloc new item
|
||||
item := c.alloc()
|
||||
item.Key = key
|
||||
item.Value = value
|
||||
item.Expiry = time.Now().Add(c.TTL)
|
||||
|
||||
var hook func(K, *Entry[K, V])
|
||||
|
||||
if c.Evict != nil {
|
||||
// Pass evicted entry to user hook
|
||||
hook = func(_ K, item *Entry[K, V]) {
|
||||
c.Evict(item)
|
||||
}
|
||||
}
|
||||
|
||||
// Place new item in the map with hook
|
||||
c.Cache.SetWithHook(key, item, hook)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Set: implements cache.Cache's Set().
|
||||
func (c *Cache[K, V]) Set(key K, value V) {
|
||||
// Write within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check if already exists
|
||||
item, ok := c.Cache.Get(key)
|
||||
|
||||
if ok {
|
||||
if c.Invalid != nil {
|
||||
// Invalidate existing
|
||||
c.Invalid(item)
|
||||
}
|
||||
} else {
|
||||
// Allocate new item
|
||||
item = c.alloc()
|
||||
item.Key = key
|
||||
c.Cache.Set(key, item)
|
||||
}
|
||||
|
||||
// Update the item value + expiry
|
||||
item.Expiry = time.Now().Add(c.TTL)
|
||||
item.Value = value
|
||||
}
|
||||
|
||||
// CAS: implements cache.Cache's CAS().
|
||||
func (c *Cache[K, V]) CAS(key K, old V, new V, cmp func(V, V) bool) bool {
|
||||
// CAS within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check for item in cache
|
||||
item, ok := c.Cache.Get(key)
|
||||
if !ok || !cmp(item.Value, old) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Invalid != nil {
|
||||
// Invalidate item
|
||||
c.Invalid(item)
|
||||
}
|
||||
|
||||
// Update item + Expiry
|
||||
item.Value = new
|
||||
item.Expiry = time.Now().Add(c.TTL)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Swap: implements cache.Cache's Swap().
|
||||
func (c *Cache[K, V]) Swap(key K, swp V) V {
|
||||
// Swap within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check for item in cache
|
||||
item, ok := c.Cache.Get(key)
|
||||
if !ok {
|
||||
var value V
|
||||
return value
|
||||
}
|
||||
|
||||
if c.Invalid != nil {
|
||||
// invalidate old
|
||||
c.Invalid(item)
|
||||
}
|
||||
|
||||
old := item.Value
|
||||
|
||||
// update item + Expiry
|
||||
item.Value = swp
|
||||
item.Expiry = time.Now().Add(c.TTL)
|
||||
|
||||
return old
|
||||
}
|
||||
|
||||
// Has: implements cache.Cache's Has().
|
||||
func (c *Cache[K, V]) Has(key K) bool {
|
||||
c.Lock()
|
||||
ok := c.Cache.Has(key)
|
||||
c.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// Invalidate: implements cache.Cache's Invalidate().
|
||||
func (c *Cache[K, V]) Invalidate(key K) bool {
|
||||
// Delete within lock
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check if we have item with key
|
||||
item, ok := c.Cache.Get(key)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Remove from cache map
|
||||
_ = c.Cache.Delete(key)
|
||||
|
||||
if c.Invalid != nil {
|
||||
// Invalidate item
|
||||
c.Invalid(item)
|
||||
}
|
||||
|
||||
// Return item to pool
|
||||
c.free(item)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Clear: implements cache.Cache's Clear().
|
||||
func (c *Cache[K, V]) Clear() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.truncate(c.Cache.Len(), c.Invalid)
|
||||
}
|
||||
|
||||
// Len: implements cache.Cache's Len().
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
c.Lock()
|
||||
l := c.Cache.Len()
|
||||
c.Unlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// Cap: implements cache.Cache's Cap().
|
||||
func (c *Cache[K, V]) Cap() int {
|
||||
c.Lock()
|
||||
l := c.Cache.Cap()
|
||||
c.Unlock()
|
||||
return l
|
||||
}
|
||||
|
||||
// truncate will call Cache.Truncate(sz), and if provided a hook will temporarily store deleted items before passing them to the hook. This is required in order to prevent cache writes during .Truncate().
|
||||
func (c *Cache[K, V]) truncate(sz int, hook func(*Entry[K, V])) {
|
||||
if hook == nil {
|
||||
// No hook was provided, we can simply truncate and free items immediately.
|
||||
c.Cache.Truncate(sz, func(_ K, item *Entry[K, V]) { c.free(item) })
|
||||
return
|
||||
}
|
||||
|
||||
// Store list of deleted items for later callbacks
|
||||
deleted := make([]*Entry[K, V], 0, sz)
|
||||
|
||||
// Truncate and store list of deleted items
|
||||
c.Cache.Truncate(sz, func(_ K, item *Entry[K, V]) {
|
||||
deleted = append(deleted, item)
|
||||
})
|
||||
|
||||
// Pass each deleted to hook, then free
|
||||
for _, item := range deleted {
|
||||
hook(item)
|
||||
c.free(item)
|
||||
}
|
||||
}
|
||||
|
||||
// alloc will acquire cache entry from pool, or allocate new.
|
||||
func (c *Cache[K, V]) alloc() *Entry[K, V] {
|
||||
if len(c.pool) == 0 {
|
||||
return &Entry[K, V]{}
|
||||
}
|
||||
idx := len(c.pool) - 1
|
||||
e := c.pool[idx]
|
||||
c.pool = c.pool[:idx]
|
||||
return e
|
||||
}
|
||||
|
||||
// free will reset entry fields and place back in pool.
|
||||
func (c *Cache[K, V]) free(e *Entry[K, V]) {
|
||||
var (
|
||||
zk K
|
||||
zv V
|
||||
)
|
||||
e.Key = zk
|
||||
e.Value = zv
|
||||
e.Expiry = time.Time{}
|
||||
c.pool = append(c.pool, e)
|
||||
}
|
9
vendor/codeberg.org/gruf/go-mangler/LICENSE
generated
vendored
Normal file
9
vendor/codeberg.org/gruf/go-mangler/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 gruf
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
40
vendor/codeberg.org/gruf/go-mangler/README.md
generated
vendored
Normal file
40
vendor/codeberg.org/gruf/go-mangler/README.md
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
# go-mangler
|
||||
|
||||
[Documentation](https://pkg.go.dev/codeberg.org/gruf/go-mangler).
|
||||
|
||||
To put it simply is a bit of an odd library. It aims to provide incredibly fast, unique string outputs for all default supported input data types during a given runtime instance.
|
||||
|
||||
It is useful, for example, for use as part of larger abstractions involving hashmaps. That was my particular usecase anyways...
|
||||
|
||||
This package does make liberal use of the "unsafe" package.
|
||||
|
||||
Benchmarks are below. Those with missing values panicked during our set of benchmarks, usually a case of not handling nil values elegantly. Please note the more important thing to notice here is the relative difference in benchmark scores, the actual `ns/op`,`B/op`,`allocs/op` accounts for running through over 80 possible test cases, including some not-ideal situations.
|
||||
|
||||
The choice of libraries in the benchmark are just a selection of libraries that could be used in a similar manner to this one, i.e. serializing in some manner.
|
||||
|
||||
```
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: codeberg.org/gruf/go-mangler
|
||||
cpu: 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz
|
||||
BenchmarkMangle
|
||||
BenchmarkMangle-8 723278 1593 ns/op 1168 B/op 120 allocs/op
|
||||
BenchmarkMangleHash
|
||||
BenchmarkMangleHash-8 405380 2788 ns/op 4496 B/op 214 allocs/op
|
||||
BenchmarkJSON
|
||||
BenchmarkJSON-8 199360 6116 ns/op 4243 B/op 142 allocs/op
|
||||
BenchmarkBinary
|
||||
BenchmarkBinary-8 ------ ---- ns/op ---- B/op --- allocs/op
|
||||
BenchmarkFmt
|
||||
BenchmarkFmt-8 168500 7111 ns/op 2256 B/op 161 allocs/op
|
||||
BenchmarkKelindarBinary
|
||||
BenchmarkKelindarBinary-8 ------ ---- ns/op ---- B/op --- allocs/op
|
||||
BenchmarkFxmackerCbor
|
||||
BenchmarkFxmackerCbor-8 361416 3255 ns/op 1495 B/op 122 allocs/op
|
||||
BenchmarkMitchellhHashStructure
|
||||
BenchmarkMitchellhHashStructure-8 117672 10493 ns/op 8443 B/op 961 allocs/op
|
||||
BenchmarkCnfStructhash
|
||||
BenchmarkCnfStructhash-8 7078 161926 ns/op 288644 B/op 5843 allocs/op
|
||||
PASS
|
||||
ok codeberg.org/gruf/go-mangler 14.377s
|
||||
```
|
97
vendor/codeberg.org/gruf/go-mangler/helpers.go
generated
vendored
Normal file
97
vendor/codeberg.org/gruf/go-mangler/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package mangler
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func deref_ptr_mangler(mangle Mangler, count int) rMangler {
|
||||
return func(buf []byte, v reflect.Value) []byte {
|
||||
for i := 0; i < count; i++ {
|
||||
// Check for nil
|
||||
if v.IsNil() {
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
// Further deref ptr
|
||||
buf = append(buf, '1')
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// Mangle fully deref'd ptr
|
||||
return mangle(buf, v.Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func deref_ptr_rmangler(mangle rMangler, count int) rMangler {
|
||||
return func(buf []byte, v reflect.Value) []byte {
|
||||
for i := 0; i < count; i++ {
|
||||
// Check for nil
|
||||
if v.IsNil() {
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
// Further deref ptr
|
||||
buf = append(buf, '1')
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// Mangle fully deref'd ptr
|
||||
return mangle(buf, v)
|
||||
}
|
||||
}
|
||||
|
||||
func iter_array_mangler(mangle Mangler) rMangler {
|
||||
return func(buf []byte, v reflect.Value) []byte {
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
buf = mangle(buf, v.Index(i).Interface())
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
if n > 0 {
|
||||
buf = buf[:len(buf)-1]
|
||||
}
|
||||
return buf
|
||||
}
|
||||
}
|
||||
|
||||
func iter_array_rmangler(mangle rMangler) rMangler {
|
||||
return func(buf []byte, v reflect.Value) []byte {
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
buf = mangle(buf, v.Index(i))
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
if n > 0 {
|
||||
buf = buf[:len(buf)-1]
|
||||
}
|
||||
return buf
|
||||
}
|
||||
}
|
||||
|
||||
func iter_map_rmangler(kMangle, vMangle rMangler) rMangler {
|
||||
return func(buf []byte, v reflect.Value) []byte {
|
||||
r := v.MapRange()
|
||||
for r.Next() {
|
||||
buf = kMangle(buf, r.Key())
|
||||
buf = append(buf, ':')
|
||||
buf = vMangle(buf, r.Value())
|
||||
buf = append(buf, '.')
|
||||
}
|
||||
if v.Len() > 0 {
|
||||
buf = buf[:len(buf)-1]
|
||||
}
|
||||
return buf
|
||||
}
|
||||
}
|
||||
|
||||
// iface_value returns the raw value ptr for input boxed within interface{} type.
|
||||
func iface_value(a any) unsafe.Pointer {
|
||||
type eface struct {
|
||||
Type unsafe.Pointer
|
||||
Value unsafe.Pointer
|
||||
}
|
||||
return (*eface)(unsafe.Pointer(&a)).Value
|
||||
}
|
354
vendor/codeberg.org/gruf/go-mangler/load.go
generated
vendored
Normal file
354
vendor/codeberg.org/gruf/go-mangler/load.go
generated
vendored
Normal file
|
@ -0,0 +1,354 @@
|
|||
package mangler
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// loadMangler is the top-most Mangler load function. It guarantees that a Mangler
|
||||
// function will be returned for given value interface{} and reflected type. Else panics.
|
||||
func loadMangler(a any, t reflect.Type) Mangler {
|
||||
// Load mangler function
|
||||
mng, rmng := load(a, t)
|
||||
|
||||
if rmng != nil {
|
||||
// Wrap reflect mangler to handle iface
|
||||
return func(buf []byte, a any) []byte {
|
||||
return rmng(buf, reflect.ValueOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
if mng == nil {
|
||||
// No mangler function could be determined
|
||||
panic("cannot mangle type: " + t.String())
|
||||
}
|
||||
|
||||
return mng
|
||||
}
|
||||
|
||||
// load will load a Mangler or reflect Mangler for given type and iface 'a'.
|
||||
// Note: allocates new interface value if nil provided, i.e. if coming via reflection.
|
||||
func load(a any, t reflect.Type) (Mangler, rMangler) {
|
||||
if t == nil {
|
||||
// There is no reflect type to search by
|
||||
panic("cannot mangle nil interface{} type")
|
||||
}
|
||||
|
||||
if a == nil {
|
||||
// Alloc new iface instance
|
||||
v := reflect.New(t).Elem()
|
||||
a = v.Interface()
|
||||
}
|
||||
|
||||
// Check in fast iface type switch
|
||||
if mng := loadIface(a); mng != nil {
|
||||
return mng, nil
|
||||
}
|
||||
|
||||
// Search by reflection
|
||||
return loadReflect(t)
|
||||
}
|
||||
|
||||
// loadIface is used as a first-resort interface{} type switcher loader
|
||||
// for types implementing Mangled and providing performant alternative
|
||||
// Mangler functions for standard library types to avoid reflection.
|
||||
func loadIface(a any) Mangler {
|
||||
switch a.(type) {
|
||||
case Mangled:
|
||||
return mangle_mangled
|
||||
|
||||
case time.Time:
|
||||
return mangle_time
|
||||
|
||||
case *time.Time:
|
||||
return mangle_time_ptr
|
||||
|
||||
case *url.URL:
|
||||
return mangle_stringer
|
||||
|
||||
case encoding.BinaryMarshaler:
|
||||
return mangle_binary
|
||||
|
||||
// NOTE:
|
||||
// we don't just handle ALL fmt.Stringer types as often
|
||||
// the output is large and unwieldy and this interface
|
||||
// switch is for types it would be faster to avoid reflection.
|
||||
// If they want better performance they can implement Mangled{}.
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// loadReflect will load a Mangler (or rMangler) function for the given reflected type info.
|
||||
// NOTE: this is used as the top level load function for nested reflective searches.
|
||||
func loadReflect(t reflect.Type) (Mangler, rMangler) {
|
||||
switch t.Kind() {
|
||||
case reflect.Pointer:
|
||||
return loadReflectPtr(t.Elem())
|
||||
|
||||
case reflect.String:
|
||||
return mangle_string, nil
|
||||
|
||||
case reflect.Array:
|
||||
return nil, loadReflectArray(t.Elem())
|
||||
|
||||
case reflect.Slice:
|
||||
// Element type
|
||||
et := t.Elem()
|
||||
|
||||
// Preferably look for known slice mangler func
|
||||
if mng := loadReflectKnownSlice(et); mng != nil {
|
||||
return mng, nil
|
||||
}
|
||||
|
||||
// Else handle as array elements
|
||||
return nil, loadReflectArray(et)
|
||||
|
||||
case reflect.Map:
|
||||
return nil, loadReflectMap(t.Key(), t.Elem())
|
||||
|
||||
case reflect.Bool:
|
||||
return mangle_bool, nil
|
||||
|
||||
case reflect.Int,
|
||||
reflect.Uint,
|
||||
reflect.Uintptr:
|
||||
return mangle_platform_int, nil
|
||||
|
||||
case reflect.Int8,
|
||||
reflect.Uint8:
|
||||
return mangle_8bit, nil
|
||||
|
||||
case reflect.Int16,
|
||||
reflect.Uint16:
|
||||
return mangle_16bit, nil
|
||||
|
||||
case reflect.Int32,
|
||||
reflect.Uint32:
|
||||
return mangle_32bit, nil
|
||||
|
||||
case reflect.Int64,
|
||||
reflect.Uint64:
|
||||
return mangle_64bit, nil
|
||||
|
||||
case reflect.Float32:
|
||||
return mangle_32bit, nil
|
||||
|
||||
case reflect.Float64:
|
||||
return mangle_64bit, nil
|
||||
|
||||
case reflect.Complex64:
|
||||
return mangle_64bit, nil
|
||||
|
||||
case reflect.Complex128:
|
||||
return mangle_128bit, nil
|
||||
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// loadReflectPtr loads a Mangler (or rMangler) function for a ptr's element type.
|
||||
// This also handles further dereferencing of any further ptr indrections (e.g. ***int).
|
||||
func loadReflectPtr(et reflect.Type) (Mangler, rMangler) {
|
||||
count := 1
|
||||
|
||||
// Iteratively dereference ptrs
|
||||
for et.Kind() == reflect.Pointer {
|
||||
et = et.Elem()
|
||||
count++
|
||||
}
|
||||
|
||||
if et.Kind() == reflect.Array {
|
||||
// Special case of addressable (sliceable) array
|
||||
if mng := loadReflectKnownSlice(et); mng != nil {
|
||||
if count == 1 {
|
||||
return mng, nil
|
||||
}
|
||||
return nil, deref_ptr_mangler(mng, count-1)
|
||||
}
|
||||
|
||||
// Look for an array mangler function, this will
|
||||
// access elements by index using reflect.Value and
|
||||
// pass each one to a separate mangler function.
|
||||
if rmng := loadReflectArray(et); rmng != nil {
|
||||
return nil, deref_ptr_rmangler(rmng, count)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Try remove a layer of derefs by loading a mangler
|
||||
// for a known ptr kind. The less reflection the better!
|
||||
if mng := loadReflectKnownPtr(et); mng != nil {
|
||||
if count == 1 {
|
||||
return mng, nil
|
||||
}
|
||||
return nil, deref_ptr_mangler(mng, count-1)
|
||||
}
|
||||
|
||||
// Search for ptr elemn type mangler
|
||||
if mng, rmng := load(nil, et); mng != nil {
|
||||
return nil, deref_ptr_mangler(mng, count)
|
||||
} else if rmng != nil {
|
||||
return nil, deref_ptr_rmangler(rmng, count)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// loadReflectKnownPtr loads a Mangler function for a known ptr-of-element type (in this case, primtive ptrs).
|
||||
func loadReflectKnownPtr(et reflect.Type) Mangler {
|
||||
switch et.Kind() {
|
||||
case reflect.String:
|
||||
return mangle_string_ptr
|
||||
|
||||
case reflect.Bool:
|
||||
return mangle_bool_ptr
|
||||
|
||||
case reflect.Int,
|
||||
reflect.Uint,
|
||||
reflect.Uintptr:
|
||||
return mangle_platform_int_ptr
|
||||
|
||||
case reflect.Int8,
|
||||
reflect.Uint8:
|
||||
return mangle_8bit_ptr
|
||||
|
||||
case reflect.Int16,
|
||||
reflect.Uint16:
|
||||
return mangle_16bit_ptr
|
||||
|
||||
case reflect.Int32,
|
||||
reflect.Uint32:
|
||||
return mangle_32bit_ptr
|
||||
|
||||
case reflect.Int64,
|
||||
reflect.Uint64:
|
||||
return mangle_64bit_ptr
|
||||
|
||||
case reflect.Float32:
|
||||
return mangle_32bit_ptr
|
||||
|
||||
case reflect.Float64:
|
||||
return mangle_64bit_ptr
|
||||
|
||||
case reflect.Complex64:
|
||||
return mangle_64bit_ptr
|
||||
|
||||
case reflect.Complex128:
|
||||
return mangle_128bit_ptr
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// loadReflectKnownSlice loads a Mangler function for a known slice-of-element type (in this case, primtives).
|
||||
func loadReflectKnownSlice(et reflect.Type) Mangler {
|
||||
switch et.Kind() {
|
||||
case reflect.String:
|
||||
return mangle_string_slice
|
||||
|
||||
case reflect.Bool:
|
||||
return mangle_bool_slice
|
||||
|
||||
case reflect.Int,
|
||||
reflect.Uint,
|
||||
reflect.Uintptr:
|
||||
return mangle_platform_int_slice
|
||||
|
||||
case reflect.Int8,
|
||||
reflect.Uint8:
|
||||
return mangle_8bit_slice
|
||||
|
||||
case reflect.Int16,
|
||||
reflect.Uint16:
|
||||
return mangle_16bit_slice
|
||||
|
||||
case reflect.Int32,
|
||||
reflect.Uint32:
|
||||
return mangle_32bit_slice
|
||||
|
||||
case reflect.Int64,
|
||||
reflect.Uint64:
|
||||
return mangle_64bit_slice
|
||||
|
||||
case reflect.Float32:
|
||||
return mangle_32bit_slice
|
||||
|
||||
case reflect.Float64:
|
||||
return mangle_64bit_slice
|
||||
|
||||
case reflect.Complex64:
|
||||
return mangle_64bit_slice
|
||||
|
||||
case reflect.Complex128:
|
||||
return mangle_128bit_slice
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// loadReflectArray loads an rMangler function for an array (or slice) or given element type.
|
||||
func loadReflectArray(et reflect.Type) rMangler {
|
||||
// Search via reflected array element type
|
||||
if mng, rmng := load(nil, et); mng != nil {
|
||||
return iter_array_mangler(mng)
|
||||
} else if rmng != nil {
|
||||
return iter_array_rmangler(rmng)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadReflectMap ...
|
||||
func loadReflectMap(kt, vt reflect.Type) rMangler {
|
||||
var kmng, vmng rMangler
|
||||
|
||||
// Search for mangler for key type
|
||||
mng, rmng := load(nil, kt)
|
||||
|
||||
switch {
|
||||
// Wrap key mangler to reflect
|
||||
case mng != nil:
|
||||
mng := mng // take our own ptr
|
||||
kmng = func(buf []byte, v reflect.Value) []byte {
|
||||
return mng(buf, v.Interface())
|
||||
}
|
||||
|
||||
// Use reflect key mangler as-is
|
||||
case rmng != nil:
|
||||
kmng = rmng
|
||||
|
||||
// No mangler found
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for mangler for value type
|
||||
mng, rmng = load(nil, vt)
|
||||
|
||||
switch {
|
||||
// Wrap key mangler to reflect
|
||||
case mng != nil:
|
||||
mng := mng // take our own ptr
|
||||
vmng = func(buf []byte, v reflect.Value) []byte {
|
||||
return mng(buf, v.Interface())
|
||||
}
|
||||
|
||||
// Use reflect key mangler as-is
|
||||
case rmng != nil:
|
||||
vmng = rmng
|
||||
|
||||
// No mangler found
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wrap key/value manglers in map iter
|
||||
return iter_map_rmangler(kmng, vmng)
|
||||
}
|
132
vendor/codeberg.org/gruf/go-mangler/mangle.go
generated
vendored
Normal file
132
vendor/codeberg.org/gruf/go-mangler/mangle.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package mangler
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/cornelk/hashmap"
|
||||
)
|
||||
|
||||
var (
|
||||
// manglers is a map of runtime type ptrs => Mangler functions.
|
||||
manglers = hashmap.New[uintptr, Mangler]()
|
||||
|
||||
// bin is a short-hand for our chosen byteorder encoding.
|
||||
bin = binary.LittleEndian
|
||||
)
|
||||
|
||||
// Mangled is an interface that allows any type to implement a custom
|
||||
// Mangler function to improve performance when mangling this type.
|
||||
type Mangled interface {
|
||||
Mangle(buf []byte) []byte
|
||||
}
|
||||
|
||||
// Mangler is a function that will take an input interface value of known
|
||||
// type, and append it in mangled serialized form to the given byte buffer.
|
||||
// While the value type is an interface, the Mangler functions are accessed
|
||||
// by the value's runtime type pointer, allowing the input value type to be known.
|
||||
type Mangler func(buf []byte, value any) []byte
|
||||
|
||||
// rMangler is functionally the same as a Mangler function, but it
|
||||
// takes the value input in reflected form. By specifying these differences
|
||||
// in mangler function types, it allows us to cut back on new calls to
|
||||
// `reflect.ValueOf()` and instead pass by existing reflected values.
|
||||
type rMangler func(buf []byte, value reflect.Value) []byte
|
||||
|
||||
// Get will fetch the Mangler function for given runtime type.
|
||||
func Get(t reflect.Type) (Mangler, bool) {
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
uptr := uintptr(iface_value(t))
|
||||
return manglers.Get(uptr)
|
||||
}
|
||||
|
||||
// Register will register the given Mangler function for use with vars of given runtime type. This allows
|
||||
// registering performant manglers for existing types not implementing Mangled (e.g. std library types).
|
||||
// NOTE: panics if there already exists a Mangler function for given type. Register on init().
|
||||
func Register(t reflect.Type, m Mangler) {
|
||||
if t == nil {
|
||||
// Nil interface{} types cannot be searched by, do not accept
|
||||
panic("cannot register mangler for nil interface{} type")
|
||||
}
|
||||
|
||||
// Get raw runtime type ptr
|
||||
uptr := uintptr(iface_value(t))
|
||||
|
||||
// Ensure this is a unique encoder
|
||||
if _, ok := manglers.Get(uptr); ok {
|
||||
panic("already registered mangler for type: " + t.String())
|
||||
}
|
||||
|
||||
// Cache this encoder func
|
||||
manglers.Set(uptr, m)
|
||||
}
|
||||
|
||||
// Append will append the mangled form of input value 'a' to buffer 'b'.
|
||||
// See mangler.String() for more information on mangled output.
|
||||
func Append(b []byte, a any) []byte {
|
||||
// Get reflect type of 'a'
|
||||
t := reflect.TypeOf(a)
|
||||
|
||||
// Get raw runtime type ptr
|
||||
uptr := uintptr(iface_value(t))
|
||||
|
||||
// Look for a cached mangler
|
||||
mng, ok := manglers.Get(uptr)
|
||||
|
||||
if !ok {
|
||||
// Load mangler into cache
|
||||
mng = loadMangler(a, t)
|
||||
manglers.Set(uptr, mng)
|
||||
}
|
||||
|
||||
// First write the type ptr (this adds
|
||||
// a unique prefix for each runtime type).
|
||||
b = mangle_platform_int(b, uptr)
|
||||
|
||||
// Finally, mangle value
|
||||
return mng(b, a)
|
||||
}
|
||||
|
||||
// String will return the mangled format of input value 'a'. This
|
||||
// mangled output will be unique for all default supported input types
|
||||
// during a single runtime instance. Uniqueness cannot be guaranteed
|
||||
// between separate runtime instances (whether running concurrently, or
|
||||
// the same application running at different times).
|
||||
//
|
||||
// The exact formatting of the output data should not be relied upon,
|
||||
// only that it is unique given the above constraints. Generally though,
|
||||
// the mangled output is the binary formatted text of given input data.
|
||||
//
|
||||
// Uniqueness is guaranteed for similar input data of differing types
|
||||
// (e.g. string("hello world") vs. []byte("hello world")) by prefixing
|
||||
// mangled output with the input data's runtime type pointer.
|
||||
//
|
||||
// Default supported types include:
|
||||
// - string
|
||||
// - bool
|
||||
// - int,int8,int16,int32,int64
|
||||
// - uint,uint8,uint16,uint32,uint64,uintptr
|
||||
// - float32,float64
|
||||
// - complex64,complex128
|
||||
// - all type aliases of above
|
||||
// - time.Time{}, *url.URL{}
|
||||
// - mangler.Mangled{}
|
||||
// - encoding.BinaryMarshaler{}
|
||||
// - all pointers to the above
|
||||
// - all slices / arrays of the above
|
||||
// - all map keys / values of the above
|
||||
func String(a any) string {
|
||||
b := Append(make([]byte, 0, 32), a)
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// Hash returns the xxHash digest of the result of mangler.Append(nil, 'a').
|
||||
func Hash(a any) uint64 {
|
||||
b := make([]byte, 0, 32)
|
||||
b = Append(b, a)
|
||||
return xxhash.Sum64(b)
|
||||
}
|
264
vendor/codeberg.org/gruf/go-mangler/manglers.go
generated
vendored
Normal file
264
vendor/codeberg.org/gruf/go-mangler/manglers.go
generated
vendored
Normal file
|
@ -0,0 +1,264 @@
|
|||
package mangler
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"time"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
// the use of unsafe conversion from the direct interface values to
|
||||
// the chosen types in each of the below functions allows us to convert
|
||||
// not only those types directly, but anything type-aliased to those
|
||||
// types. e.g. `time.Duration` directly as int64.
|
||||
|
||||
func mangle_string(buf []byte, a any) []byte {
|
||||
return append(buf, *(*string)(iface_value(a))...)
|
||||
}
|
||||
|
||||
func mangle_string_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*string)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
return append(buf, *ptr...)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_string_slice(buf []byte, a any) []byte {
|
||||
s := *(*[]string)(iface_value(a))
|
||||
for _, s := range s {
|
||||
buf = append(buf, s...)
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
if len(s) > 0 {
|
||||
buf = buf[:len(buf)-1]
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_bool(buf []byte, a any) []byte {
|
||||
if *(*bool)(iface_value(a)) {
|
||||
return append(buf, '1')
|
||||
}
|
||||
return append(buf, '0')
|
||||
}
|
||||
|
||||
func mangle_bool_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*bool)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
if *ptr {
|
||||
return append(buf, '1')
|
||||
}
|
||||
return append(buf, '0')
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_bool_slice(buf []byte, a any) []byte {
|
||||
for _, b := range *(*[]bool)(iface_value(a)) {
|
||||
if b {
|
||||
buf = append(buf, '1')
|
||||
} else {
|
||||
buf = append(buf, '0')
|
||||
}
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_8bit(buf []byte, a any) []byte {
|
||||
return append(buf, *(*uint8)(iface_value(a)))
|
||||
}
|
||||
|
||||
func mangle_8bit_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*uint8)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
return append(buf, *ptr)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_8bit_slice(buf []byte, a any) []byte {
|
||||
return append(buf, *(*[]uint8)(iface_value(a))...)
|
||||
}
|
||||
|
||||
func mangle_16bit(buf []byte, a any) []byte {
|
||||
return bin.AppendUint16(buf, *(*uint16)(iface_value(a)))
|
||||
}
|
||||
|
||||
func mangle_16bit_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*uint16)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
return bin.AppendUint16(buf, *ptr)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_16bit_slice(buf []byte, a any) []byte {
|
||||
for _, u := range *(*[]uint16)(iface_value(a)) {
|
||||
buf = bin.AppendUint16(buf, u)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_32bit(buf []byte, a any) []byte {
|
||||
return bin.AppendUint32(buf, *(*uint32)(iface_value(a)))
|
||||
}
|
||||
|
||||
func mangle_32bit_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*uint32)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
return bin.AppendUint32(buf, *ptr)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_32bit_slice(buf []byte, a any) []byte {
|
||||
for _, u := range *(*[]uint32)(iface_value(a)) {
|
||||
buf = bin.AppendUint32(buf, u)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_64bit(buf []byte, a any) []byte {
|
||||
return bin.AppendUint64(buf, *(*uint64)(iface_value(a)))
|
||||
}
|
||||
|
||||
func mangle_64bit_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*uint64)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
return bin.AppendUint64(buf, *ptr)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_64bit_slice(buf []byte, a any) []byte {
|
||||
for _, u := range *(*[]uint64)(iface_value(a)) {
|
||||
buf = bin.AppendUint64(buf, u)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// mangle_platform_int contains the correct iface mangler on runtime for platform int size.
|
||||
var mangle_platform_int = func() Mangler {
|
||||
switch bits.UintSize {
|
||||
case 32:
|
||||
return mangle_32bit
|
||||
case 64:
|
||||
return mangle_64bit
|
||||
default:
|
||||
panic("unexpected platform int size")
|
||||
}
|
||||
}()
|
||||
|
||||
// mangle_platform_int_ptr contains the correct iface mangler on runtime for platform int size.
|
||||
var mangle_platform_int_ptr = func() Mangler {
|
||||
switch bits.UintSize {
|
||||
case 32:
|
||||
return mangle_32bit_ptr
|
||||
case 64:
|
||||
return mangle_64bit_ptr
|
||||
default:
|
||||
panic("unexpected platform int size")
|
||||
}
|
||||
}()
|
||||
|
||||
// mangle_platform_int_slice contains the correct iface mangler on runtime for platform int size.
|
||||
var mangle_platform_int_slice = func() Mangler {
|
||||
switch bits.UintSize {
|
||||
case 32:
|
||||
return mangle_32bit_slice
|
||||
case 64:
|
||||
return mangle_64bit_slice
|
||||
default:
|
||||
panic("unexpected platform int size")
|
||||
}
|
||||
}()
|
||||
|
||||
// uint128 provides an easily mangleable data type for 128bit data types to be cast into.
|
||||
type uint128 [2]uint64
|
||||
|
||||
func mangle_128bit(buf []byte, a any) []byte {
|
||||
u2 := *(*uint128)(iface_value(a))
|
||||
buf = bin.AppendUint64(buf, u2[0])
|
||||
buf = bin.AppendUint64(buf, u2[1])
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_128bit_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*uint128)(iface_value(a)); ptr != nil {
|
||||
buf = append(buf, '1')
|
||||
buf = bin.AppendUint64(buf, (*ptr)[0])
|
||||
buf = bin.AppendUint64(buf, (*ptr)[1])
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_128bit_slice(buf []byte, a any) []byte {
|
||||
for _, u2 := range *(*[]uint128)(iface_value(a)) {
|
||||
buf = bin.AppendUint64(buf, u2[0])
|
||||
buf = bin.AppendUint64(buf, u2[1])
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_time(buf []byte, a any) []byte {
|
||||
t := *(*time.Time)(iface_value(a))
|
||||
b, err := t.MarshalBinary()
|
||||
if err != nil {
|
||||
panic("marshal_time: " + err.Error())
|
||||
}
|
||||
return append(buf, b...)
|
||||
}
|
||||
|
||||
func mangle_time_ptr(buf []byte, a any) []byte {
|
||||
if ptr := (*time.Time)(iface_value(a)); ptr != nil {
|
||||
b, err := ptr.MarshalBinary()
|
||||
if err != nil {
|
||||
panic("marshal_time: " + err.Error())
|
||||
}
|
||||
buf = append(buf, '1')
|
||||
return append(buf, b...)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_mangled(buf []byte, a any) []byte {
|
||||
if v := a.(Mangled); v != nil {
|
||||
buf = append(buf, '1')
|
||||
return v.Mangle(buf)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_binary(buf []byte, a any) []byte {
|
||||
if v := a.(encoding.BinaryMarshaler); v != nil {
|
||||
b, err := v.MarshalBinary()
|
||||
if err != nil {
|
||||
panic("mangle_binary: " + err.Error())
|
||||
}
|
||||
buf = append(buf, '1')
|
||||
return append(buf, b...)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
||||
|
||||
func mangle_stringer(buf []byte, a any) []byte {
|
||||
if v := a.(fmt.Stringer); v != nil {
|
||||
buf = append(buf, '1')
|
||||
return append(buf, v.String()...)
|
||||
}
|
||||
buf = append(buf, '0')
|
||||
return buf
|
||||
}
|
9
vendor/codeberg.org/gruf/go-maps/LICENSE
generated
vendored
Normal file
9
vendor/codeberg.org/gruf/go-maps/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 gruf
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
7
vendor/codeberg.org/gruf/go-maps/README.md
generated
vendored
Normal file
7
vendor/codeberg.org/gruf/go-maps/README.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# go-maps
|
||||
|
||||
Provides a selection of hashmaps (or, "dictionaries") with features exceeding that of the default Go runtime hashmap.
|
||||
|
||||
Includes:
|
||||
- OrderedMap
|
||||
- LRUMap
|
289
vendor/codeberg.org/gruf/go-maps/common.go
generated
vendored
Normal file
289
vendor/codeberg.org/gruf/go-maps/common.go
generated
vendored
Normal file
|
@ -0,0 +1,289 @@
|
|||
package maps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"codeberg.org/gruf/go-byteutil"
|
||||
"codeberg.org/gruf/go-kv"
|
||||
)
|
||||
|
||||
// ordered provides a common ordered hashmap base, storing order in a doubly-linked list.
|
||||
type ordered[K comparable, V any] struct {
|
||||
hmap map[K]*elem[K, V]
|
||||
list list[K, V]
|
||||
pool []*elem[K, V]
|
||||
rnly bool
|
||||
}
|
||||
|
||||
// write_check panics if map is not in a safe-state to write to.
|
||||
func (m ordered[K, V]) write_check() {
|
||||
if m.rnly {
|
||||
panic("map write during read loop")
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns whether key exists in map.
|
||||
func (m *ordered[K, V]) Has(key K) bool {
|
||||
_, ok := m.hmap[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Delete will delete given key from map, returns false if not found.
|
||||
func (m *ordered[K, V]) Delete(key K) bool {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Look for existing elem
|
||||
elem, ok := m.hmap[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Drop from list
|
||||
m.list.Unlink(elem)
|
||||
|
||||
// Delete from map
|
||||
delete(m.hmap, key)
|
||||
|
||||
// Return to pool
|
||||
m.free(elem)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Range passes given function over the requested range of the map.
|
||||
func (m *ordered[K, V]) Range(start, length int, fn func(int, K, V)) {
|
||||
// Disallow writes
|
||||
m.rnly = true
|
||||
defer func() {
|
||||
m.rnly = false
|
||||
}()
|
||||
|
||||
// Nil check
|
||||
_ = fn
|
||||
|
||||
switch end := start + length; {
|
||||
// No loop to iterate
|
||||
case length == 0:
|
||||
if start < 0 || (m.list.len > 0 && start >= m.list.len) {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Step backwards
|
||||
case length < 0:
|
||||
// Check loop indices are within map bounds
|
||||
if end < -1 || start >= m.list.len || m.list.len == 0 {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Get starting index elem
|
||||
elem := m.list.Index(start)
|
||||
|
||||
for i := start; i > end; i-- {
|
||||
fn(i, elem.K, elem.V)
|
||||
elem = elem.prev
|
||||
}
|
||||
|
||||
// Step forwards
|
||||
case length > 0:
|
||||
// Check loop indices are within map bounds
|
||||
if start < 0 || end > m.list.len || m.list.len == 0 {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Get starting index elem
|
||||
elem := m.list.Index(start)
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
fn(i, elem.K, elem.V)
|
||||
elem = elem.next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RangeIf passes given function over the requested range of the map. Returns early on 'fn' -> false.
|
||||
func (m *ordered[K, V]) RangeIf(start, length int, fn func(int, K, V) bool) {
|
||||
// Disallow writes
|
||||
m.rnly = true
|
||||
defer func() {
|
||||
m.rnly = false
|
||||
}()
|
||||
|
||||
// Nil check
|
||||
_ = fn
|
||||
|
||||
switch end := start + length; {
|
||||
// No loop to iterate
|
||||
case length == 0:
|
||||
if start < 0 || (m.list.len > 0 && start >= m.list.len) {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Step backwards
|
||||
case length < 0:
|
||||
// Check loop indices are within map bounds
|
||||
if end < -1 || start >= m.list.len || m.list.len == 0 {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Get starting index elem
|
||||
elem := m.list.Index(start)
|
||||
|
||||
for i := start; i > end; i-- {
|
||||
if !fn(i, elem.K, elem.V) {
|
||||
return
|
||||
}
|
||||
elem = elem.prev
|
||||
}
|
||||
|
||||
// Step forwards
|
||||
case length > 0:
|
||||
// Check loop indices are within map bounds
|
||||
if start < 0 || end > m.list.len || m.list.len == 0 {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Get starting index elem
|
||||
elem := m.list.Index(start)
|
||||
|
||||
for i := start; i < end; i++ {
|
||||
if !fn(i, elem.K, elem.V) {
|
||||
return
|
||||
}
|
||||
elem = elem.next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate will truncate the map from the back by given amount, passing dropped elements to given function.
|
||||
func (m *ordered[K, V]) Truncate(sz int, fn func(K, V)) {
|
||||
// Check size withing bounds
|
||||
if sz > m.list.len {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
if fn == nil {
|
||||
// move nil check out of loop
|
||||
fn = func(K, V) {}
|
||||
}
|
||||
|
||||
// Disallow writes
|
||||
m.rnly = true
|
||||
defer func() {
|
||||
m.rnly = false
|
||||
}()
|
||||
|
||||
for i := 0; i < sz; i++ {
|
||||
// Pop current tail
|
||||
elem := m.list.tail
|
||||
m.list.Unlink(elem)
|
||||
|
||||
// Delete from map
|
||||
delete(m.hmap, elem.K)
|
||||
|
||||
// Pass dropped to func
|
||||
fn(elem.K, elem.V)
|
||||
|
||||
// Release to pool
|
||||
m.free(elem)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the current length of the map.
|
||||
func (m *ordered[K, V]) Len() int {
|
||||
return m.list.len
|
||||
}
|
||||
|
||||
// format implements fmt.Formatter, allowing performant string formatting of map.
|
||||
func (m *ordered[K, V]) format(rtype reflect.Type, state fmt.State, verb rune) {
|
||||
var (
|
||||
kvbuf byteutil.Buffer
|
||||
field kv.Field
|
||||
vbose bool
|
||||
)
|
||||
|
||||
switch {
|
||||
// Only handle 'v' verb
|
||||
case verb != 'v':
|
||||
panic("invalid verb '" + string(verb) + "' for map")
|
||||
|
||||
// Prefix with type when verbose
|
||||
case state.Flag('#'):
|
||||
state.Write([]byte(rtype.String()))
|
||||
}
|
||||
|
||||
// Disallow writes
|
||||
m.rnly = true
|
||||
defer func() {
|
||||
m.rnly = false
|
||||
}()
|
||||
|
||||
// Write map opening brace
|
||||
state.Write([]byte{'{'})
|
||||
|
||||
if m.list.len > 0 {
|
||||
// Preallocate buffer
|
||||
kvbuf.Guarantee(64)
|
||||
|
||||
// Start at index 0
|
||||
elem := m.list.head
|
||||
|
||||
for i := 0; i < m.list.len-1; i++ {
|
||||
// Append formatted key-val pair to state
|
||||
field.K = fmt.Sprint(elem.K)
|
||||
field.V = elem.V
|
||||
field.AppendFormat(&kvbuf, vbose)
|
||||
_, _ = state.Write(kvbuf.B)
|
||||
kvbuf.Reset()
|
||||
|
||||
// Prepare buffer with comma separator
|
||||
kvbuf.B = append(kvbuf.B, `, `...)
|
||||
|
||||
// Jump to next in list
|
||||
elem = elem.next
|
||||
}
|
||||
|
||||
// Append formatted key-val pair to state
|
||||
field.K = fmt.Sprint(elem.K)
|
||||
field.V = elem.V
|
||||
field.AppendFormat(&kvbuf, vbose)
|
||||
_, _ = state.Write(kvbuf.B)
|
||||
}
|
||||
|
||||
// Write map closing brace
|
||||
state.Write([]byte{'}'})
|
||||
}
|
||||
|
||||
// Std returns a clone of map's data in the standard library equivalent map type.
|
||||
func (m *ordered[K, V]) Std() map[K]V {
|
||||
std := make(map[K]V, m.list.len)
|
||||
for _, elem := range m.hmap {
|
||||
std[elem.K] = elem.V
|
||||
}
|
||||
return std
|
||||
}
|
||||
|
||||
// alloc will acquire list element from pool, or allocate new.
|
||||
func (m *ordered[K, V]) alloc() *elem[K, V] {
|
||||
if len(m.pool) == 0 {
|
||||
return &elem[K, V]{}
|
||||
}
|
||||
idx := len(m.pool) - 1
|
||||
elem := m.pool[idx]
|
||||
m.pool = m.pool[:idx]
|
||||
return elem
|
||||
}
|
||||
|
||||
// free will reset elem fields and place back in pool.
|
||||
func (m *ordered[K, V]) free(elem *elem[K, V]) {
|
||||
var (
|
||||
zk K
|
||||
zv V
|
||||
)
|
||||
elem.K = zk
|
||||
elem.V = zv
|
||||
elem.next = nil
|
||||
elem.prev = nil
|
||||
m.pool = append(m.pool, elem)
|
||||
}
|
154
vendor/codeberg.org/gruf/go-maps/list.go
generated
vendored
Normal file
154
vendor/codeberg.org/gruf/go-maps/list.go
generated
vendored
Normal file
|
@ -0,0 +1,154 @@
|
|||
package maps
|
||||
|
||||
// list is a doubly-linked list containing elemnts with key-value pairs of given generic parameter types.
|
||||
type list[K comparable, V any] struct {
|
||||
head *elem[K, V]
|
||||
tail *elem[K, V]
|
||||
len int
|
||||
}
|
||||
|
||||
// Index returns the element at index from list.
|
||||
func (l *list[K, V]) Index(idx int) *elem[K, V] {
|
||||
switch {
|
||||
// Idx in first half
|
||||
case idx < l.len/2:
|
||||
elem := l.head
|
||||
for i := 0; i < idx; i++ {
|
||||
elem = elem.next
|
||||
}
|
||||
return elem
|
||||
|
||||
// Index in last half
|
||||
default:
|
||||
elem := l.tail
|
||||
for i := l.len - 1; i > idx; i-- {
|
||||
elem = elem.prev
|
||||
}
|
||||
return elem
|
||||
}
|
||||
}
|
||||
|
||||
// PushFront will push the given element to the front of the list.
|
||||
func (l *list[K, V]) PushFront(elem *elem[K, V]) {
|
||||
if l.len == 0 {
|
||||
// Set new tail + head
|
||||
l.head = elem
|
||||
l.tail = elem
|
||||
|
||||
// Link elem to itself
|
||||
elem.next = elem
|
||||
elem.prev = elem
|
||||
} else {
|
||||
oldHead := l.head
|
||||
|
||||
// Link to old head
|
||||
elem.next = oldHead
|
||||
oldHead.prev = elem
|
||||
|
||||
// Link up to tail
|
||||
elem.prev = l.tail
|
||||
l.tail.next = elem
|
||||
|
||||
// Set new head
|
||||
l.head = elem
|
||||
}
|
||||
|
||||
// Incr count
|
||||
l.len++
|
||||
}
|
||||
|
||||
// PushBack will push the given element to the back of the list.
|
||||
func (l *list[K, V]) PushBack(elem *elem[K, V]) {
|
||||
if l.len == 0 {
|
||||
// Set new tail + head
|
||||
l.head = elem
|
||||
l.tail = elem
|
||||
|
||||
// Link elem to itself
|
||||
elem.next = elem
|
||||
elem.prev = elem
|
||||
} else {
|
||||
oldTail := l.tail
|
||||
|
||||
// Link up to head
|
||||
elem.next = l.head
|
||||
l.head.prev = elem
|
||||
|
||||
// Link to old tail
|
||||
elem.prev = oldTail
|
||||
oldTail.next = elem
|
||||
|
||||
// Set new tail
|
||||
l.tail = elem
|
||||
}
|
||||
|
||||
// Incr count
|
||||
l.len++
|
||||
}
|
||||
|
||||
// PopTail will pop the current tail of the list, returns nil if empty.
|
||||
func (l *list[K, V]) PopTail() *elem[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
elem := l.tail
|
||||
l.Unlink(elem)
|
||||
return elem
|
||||
}
|
||||
|
||||
// Unlink will unlink the given element from the doubly-linked list chain.
|
||||
func (l *list[K, V]) Unlink(elem *elem[K, V]) {
|
||||
if l.len <= 1 {
|
||||
// Drop elem's links
|
||||
elem.next = nil
|
||||
elem.prev = nil
|
||||
|
||||
// Only elem in list
|
||||
l.head = nil
|
||||
l.tail = nil
|
||||
l.len = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Get surrounding elems
|
||||
next := elem.next
|
||||
prev := elem.prev
|
||||
|
||||
// Relink chain
|
||||
next.prev = prev
|
||||
prev.next = next
|
||||
|
||||
switch elem {
|
||||
// Set new head
|
||||
case l.head:
|
||||
l.head = next
|
||||
|
||||
// Set new tail
|
||||
case l.tail:
|
||||
l.tail = prev
|
||||
}
|
||||
|
||||
// Drop elem's links
|
||||
elem.next = nil
|
||||
elem.prev = nil
|
||||
|
||||
// Decr count
|
||||
l.len--
|
||||
}
|
||||
|
||||
// elem represents an element in a doubly-linked list.
|
||||
type elem[K comparable, V any] struct {
|
||||
next *elem[K, V]
|
||||
prev *elem[K, V]
|
||||
K K
|
||||
V V
|
||||
}
|
||||
|
||||
// allocElems will allocate a slice of empty elements of length.
|
||||
func allocElems[K comparable, V any](i int) []*elem[K, V] {
|
||||
s := make([]*elem[K, V], i)
|
||||
for i := range s {
|
||||
s[i] = &elem[K, V]{}
|
||||
}
|
||||
return s
|
||||
}
|
153
vendor/codeberg.org/gruf/go-maps/lru.go
generated
vendored
Normal file
153
vendor/codeberg.org/gruf/go-maps/lru.go
generated
vendored
Normal file
|
@ -0,0 +1,153 @@
|
|||
package maps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// LRU provides an ordered hashmap implementation that keeps elements ordered according to last recently used (hence, LRU).
|
||||
type LRUMap[K comparable, V any] struct {
|
||||
ordered[K, V]
|
||||
size int
|
||||
}
|
||||
|
||||
// NewLRU returns a new instance of LRUMap with given initializing length and maximum capacity.
|
||||
func NewLRU[K comparable, V any](len, cap int) *LRUMap[K, V] {
|
||||
m := new(LRUMap[K, V])
|
||||
m.Init(len, cap)
|
||||
return m
|
||||
}
|
||||
|
||||
// Init will initialize this map with initial length and maximum capacity.
|
||||
func (m *LRUMap[K, V]) Init(len, cap int) {
|
||||
if cap <= 0 {
|
||||
panic("lru cap must be greater than zero")
|
||||
} else if m.pool != nil {
|
||||
panic("lru map already initialized")
|
||||
}
|
||||
m.ordered.hmap = make(map[K]*elem[K, V], len)
|
||||
m.ordered.pool = allocElems[K, V](len)
|
||||
m.size = cap
|
||||
}
|
||||
|
||||
// Get will fetch value for given key from map, in the process pushing it to the front of the map. Returns false if not found.
|
||||
func (m *LRUMap[K, V]) Get(key K) (V, bool) {
|
||||
if elem, ok := m.hmap[key]; ok {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Unlink elem from list
|
||||
m.list.Unlink(elem)
|
||||
|
||||
// Push to front of list
|
||||
m.list.PushFront(elem)
|
||||
|
||||
return elem.V, true
|
||||
}
|
||||
var z V // zero value
|
||||
return z, false
|
||||
}
|
||||
|
||||
// Add will add the given key-value pair to the map, pushing them to the front of the map. Returns false if already exists. Evicts old at maximum capacity.
|
||||
func (m *LRUMap[K, V]) Add(key K, value V) bool {
|
||||
return m.AddWithHook(key, value, nil)
|
||||
}
|
||||
|
||||
// AddWithHook performs .Add() but passing any evicted entry to given hook function.
|
||||
func (m *LRUMap[K, V]) AddWithHook(key K, value V, evict func(K, V)) bool {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Look for existing elem
|
||||
elem, ok := m.hmap[key]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if m.list.len >= m.size {
|
||||
// We're at capacity, sir!
|
||||
// Pop current tail elem
|
||||
elem = m.list.PopTail()
|
||||
|
||||
if evict != nil {
|
||||
// Pass to evict hook
|
||||
evict(elem.K, elem.V)
|
||||
}
|
||||
|
||||
// Delete key from map
|
||||
delete(m.hmap, elem.K)
|
||||
} else {
|
||||
// Allocate elem
|
||||
elem = m.alloc()
|
||||
}
|
||||
|
||||
// Set elem
|
||||
elem.K = key
|
||||
elem.V = value
|
||||
|
||||
// Add element map entry
|
||||
m.hmap[key] = elem
|
||||
|
||||
// Push to front of list
|
||||
m.list.PushFront(elem)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set will ensure that given key-value pair exists in the map, by either adding new or updating existing, pushing them to the front of the map. Evicts old at maximum capacity.
|
||||
func (m *LRUMap[K, V]) Set(key K, value V) {
|
||||
m.SetWithHook(key, value, nil)
|
||||
}
|
||||
|
||||
// SetWithHook performs .Set() but passing any evicted entry to given hook function.
|
||||
func (m *LRUMap[K, V]) SetWithHook(key K, value V, evict func(K, V)) {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Look for existing elem
|
||||
elem, ok := m.hmap[key]
|
||||
|
||||
if ok {
|
||||
// Unlink elem from list
|
||||
m.list.Unlink(elem)
|
||||
|
||||
// Update existing
|
||||
elem.V = value
|
||||
} else {
|
||||
if m.list.len >= m.size {
|
||||
// We're at capacity, sir!
|
||||
// Pop current tail elem
|
||||
elem = m.list.PopTail()
|
||||
|
||||
if evict != nil {
|
||||
// Pass to evict hook
|
||||
evict(elem.K, elem.V)
|
||||
}
|
||||
|
||||
// Delete key from map
|
||||
delete(m.hmap, elem.K)
|
||||
} else {
|
||||
// Allocate elem
|
||||
elem = m.alloc()
|
||||
}
|
||||
|
||||
// Set elem
|
||||
elem.K = key
|
||||
elem.V = value
|
||||
|
||||
// Add element map entry
|
||||
m.hmap[key] = elem
|
||||
}
|
||||
|
||||
// Push to front of list
|
||||
m.list.PushFront(elem)
|
||||
}
|
||||
|
||||
// Cap returns the maximum capacity of this LRU map.
|
||||
func (m *LRUMap[K, V]) Cap() int {
|
||||
return m.size
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter, allowing performant string formatting of map.
|
||||
func (m *LRUMap[K, V]) Format(state fmt.State, verb rune) {
|
||||
m.format(reflect.TypeOf(m), state, verb)
|
||||
}
|
159
vendor/codeberg.org/gruf/go-maps/ordered.go
generated
vendored
Normal file
159
vendor/codeberg.org/gruf/go-maps/ordered.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
package maps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// OrderedMap provides a hashmap implementation that tracks the order in which keys are added.
|
||||
type OrderedMap[K comparable, V any] struct {
|
||||
ordered[K, V]
|
||||
}
|
||||
|
||||
// NewOrdered returns a new instance of LRUMap with given initializing length and maximum capacity.
|
||||
func NewOrdered[K comparable, V any](len int) *OrderedMap[K, V] {
|
||||
m := new(OrderedMap[K, V])
|
||||
m.Init(len)
|
||||
return m
|
||||
}
|
||||
|
||||
// Init will initialize this map with initial length.
|
||||
func (m *OrderedMap[K, V]) Init(len int) {
|
||||
if m.pool != nil {
|
||||
panic("ordered map already initialized")
|
||||
}
|
||||
m.ordered.hmap = make(map[K]*elem[K, V], len)
|
||||
m.ordered.pool = allocElems[K, V](len)
|
||||
}
|
||||
|
||||
// Get will fetch value for given key from map. Returns false if not found.
|
||||
func (m *OrderedMap[K, V]) Get(key K) (V, bool) {
|
||||
if elem, ok := m.hmap[key]; ok {
|
||||
return elem.V, true
|
||||
}
|
||||
var z V // zero value
|
||||
return z, false
|
||||
}
|
||||
|
||||
// Add will add the given key-value pair to the map, returns false if already exists.
|
||||
func (m *OrderedMap[K, V]) Add(key K, value V) bool {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Look for existing elem
|
||||
elem, ok := m.hmap[key]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// Allocate elem
|
||||
elem = m.alloc()
|
||||
elem.K = key
|
||||
elem.V = value
|
||||
|
||||
// Add element map entry
|
||||
m.hmap[key] = elem
|
||||
|
||||
// Push to back of list
|
||||
m.list.PushBack(elem)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set will ensure that given key-value pair exists in the map, by either adding new or updating existing.
|
||||
func (m *OrderedMap[K, V]) Set(key K, value V) {
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Look for existing elem
|
||||
elem, ok := m.hmap[key]
|
||||
|
||||
if ok {
|
||||
// Update existing
|
||||
elem.V = value
|
||||
} else {
|
||||
// Allocate elem
|
||||
elem = m.alloc()
|
||||
elem.K = key
|
||||
elem.V = value
|
||||
|
||||
// Add element map entry
|
||||
m.hmap[key] = elem
|
||||
|
||||
// Push to back of list
|
||||
m.list.PushBack(elem)
|
||||
}
|
||||
}
|
||||
|
||||
// Index returns the key-value pair at index from map. Returns false if index out of range.
|
||||
func (m *OrderedMap[K, V]) Index(idx int) (K, V, bool) {
|
||||
if idx < 0 || idx >= m.list.len {
|
||||
var (
|
||||
zk K
|
||||
zv V
|
||||
) // zero values
|
||||
return zk, zv, false
|
||||
}
|
||||
elem := m.list.Index(idx)
|
||||
return elem.K, elem.V, true
|
||||
}
|
||||
|
||||
// Push will insert the given key-value pair at index in the map. Panics if index out of range.
|
||||
func (m *OrderedMap[K, V]) Push(idx int, key K, value V) {
|
||||
// Check index within bounds of map
|
||||
if idx < 0 || idx >= m.list.len {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Get element at index
|
||||
next := m.list.Index(idx)
|
||||
|
||||
// Allocate new elem
|
||||
elem := m.alloc()
|
||||
elem.K = key
|
||||
elem.V = value
|
||||
|
||||
// Add element map entry
|
||||
m.hmap[key] = elem
|
||||
|
||||
// Move next forward
|
||||
elem.next = next
|
||||
elem.prev = next.prev
|
||||
|
||||
// Link up elem in chain
|
||||
next.prev.next = elem
|
||||
next.prev = elem
|
||||
}
|
||||
|
||||
// Pop will remove and return the key-value pair at index in the map. Panics if index out of range.
|
||||
func (m *OrderedMap[K, V]) Pop(idx int) (K, V) {
|
||||
// Check index within bounds of map
|
||||
if idx < 0 || idx >= m.list.len {
|
||||
panic("index out of bounds")
|
||||
}
|
||||
|
||||
// Ensure safe
|
||||
m.write_check()
|
||||
|
||||
// Get element at index
|
||||
elem := m.list.Index(idx)
|
||||
|
||||
// Unlink elem from list
|
||||
m.list.Unlink(elem)
|
||||
|
||||
// Get elem values
|
||||
k := elem.K
|
||||
v := elem.V
|
||||
|
||||
// Release to pool
|
||||
m.free(elem)
|
||||
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter, allowing performant string formatting of map.
|
||||
func (m *OrderedMap[K, V]) Format(state fmt.State, verb rune) {
|
||||
m.format(reflect.TypeOf(m), state, verb)
|
||||
}
|
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
Normal file
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
# xxhash
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
The API is very small, taking its cue from the other hashing packages in the
|
||||
standard library:
|
||||
|
||||
$ go doc github.com/cespare/xxhash !
|
||||
package xxhash // import "github.com/cespare/xxhash"
|
||||
|
||||
Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
at http://cyan4973.github.io/xxHash/.
|
||||
|
||||
func New() hash.Hash64
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64 against another popular Go XXH64 implementation,
|
||||
[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
|
||||
|
||||
| input size | OneOfOne | cespare (purego) | cespare |
|
||||
| --- | --- | --- | --- |
|
||||
| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
|
||||
| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
|
||||
| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
|
||||
| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
|
||||
|
||||
These numbers were generated with:
|
||||
|
||||
```
|
||||
$ go test -benchtime 10s -bench '/OneOfOne,'
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,'
|
||||
$ go test -benchtime 10s -bench '/xxhash,'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
Normal file
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build !go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
|
||||
|
||||
func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
|
||||
func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
|
||||
func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
|
||||
func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
|
||||
func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
|
||||
func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
|
||||
func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
|
||||
func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
|
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
Normal file
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
Normal file
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
type xxh struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total int
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
|
||||
func New() hash.Hash64 {
|
||||
var x xxh
|
||||
x.Reset()
|
||||
return &x
|
||||
}
|
||||
|
||||
func (x *xxh) Reset() {
|
||||
x.n = 0
|
||||
x.total = 0
|
||||
x.v1 = prime1v + prime2
|
||||
x.v2 = prime2
|
||||
x.v3 = 0
|
||||
x.v4 = -prime1v
|
||||
}
|
||||
|
||||
func (x *xxh) Size() int { return 8 }
|
||||
func (x *xxh) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to x. It always returns len(b), nil.
|
||||
func (x *xxh) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
x.total += len(b)
|
||||
|
||||
if x.n+len(b) < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.n += len(b)
|
||||
return
|
||||
}
|
||||
|
||||
if x.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.v1 = round(x.v1, u64(x.mem[0:8]))
|
||||
x.v2 = round(x.v2, u64(x.mem[8:16]))
|
||||
x.v3 = round(x.v3, u64(x.mem[16:24]))
|
||||
x.v4 = round(x.v4, u64(x.mem[24:32]))
|
||||
b = b[32-x.n:]
|
||||
x.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
b = writeBlocks(x, b)
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(x.mem[:], b)
|
||||
x.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (x *xxh) Sum(b []byte) []byte {
|
||||
s := x.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
func (x *xxh) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if x.total >= 32 {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = x.v3 + prime5
|
||||
}
|
||||
|
||||
h += uint64(x.total)
|
||||
|
||||
i, end := 0, x.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(x.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(x.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(x.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
Normal file
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte
|
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
Normal file
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the x pointer.
|
||||
|
||||
// func writeBlocks(x *xxh, b []byte) []byte
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-56
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from x.
|
||||
MOVQ x+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to x.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// Construct return slice.
|
||||
// NOTE: It's important that we don't construct a slice that has a base
|
||||
// pointer off the end of the original slice, as in Go 1.7+ this will
|
||||
// cause runtime crashes. (See discussion in, for example,
|
||||
// https://github.com/golang/go/issues/16772.)
|
||||
// Therefore, we calculate the length/cap first, and if they're zero, we
|
||||
// keep the old base. This is what the compiler does as well if you
|
||||
// write code like
|
||||
// b = b[len(b):]
|
||||
|
||||
// New length is 32 - (CX - BX) -> BX+32 - CX.
|
||||
ADDQ $32, BX
|
||||
SUBQ CX, BX
|
||||
JZ afterSetBase
|
||||
|
||||
MOVQ CX, ret_base+32(FP)
|
||||
|
||||
afterSetBase:
|
||||
MOVQ BX, ret_len+40(FP)
|
||||
MOVQ BX, ret_cap+48(FP) // set cap == len
|
||||
|
||||
RET
|
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
Normal file
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// x := New()
|
||||
// x.Write(b)
|
||||
// return x.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
|
||||
return b
|
||||
}
|
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
//
|
||||
// TODO(caleb): Consider removing this if an optimization is ever added to make
|
||||
// it unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// TODO(caleb): We still have a function call; we could instead write Go/asm
|
||||
// copies of Sum64 for strings to squeeze out a bit more speed.
|
||||
func Sum64String(s string) uint64 {
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about this unsafe conversion.
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
13
vendor/modules.txt
vendored
13
vendor/modules.txt
vendored
|
@ -16,6 +16,10 @@ codeberg.org/gruf/go-byteutil
|
|||
# codeberg.org/gruf/go-cache/v2 v2.1.4
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-cache/v2
|
||||
# codeberg.org/gruf/go-cache/v3 v3.1.6
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-cache/v3/result
|
||||
codeberg.org/gruf/go-cache/v3/ttl
|
||||
# codeberg.org/gruf/go-debug v1.2.0
|
||||
## explicit; go 1.16
|
||||
codeberg.org/gruf/go-debug
|
||||
|
@ -38,6 +42,12 @@ codeberg.org/gruf/go-kv/format
|
|||
# codeberg.org/gruf/go-logger/v2 v2.2.1
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-logger/v2/level
|
||||
# codeberg.org/gruf/go-mangler v1.1.1
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-mangler
|
||||
# codeberg.org/gruf/go-maps v1.0.3
|
||||
## explicit; go 1.19
|
||||
codeberg.org/gruf/go-maps
|
||||
# codeberg.org/gruf/go-mutexes v1.1.4
|
||||
## explicit; go 1.14
|
||||
codeberg.org/gruf/go-mutexes
|
||||
|
@ -63,6 +73,9 @@ github.com/aymerick/douceur/parser
|
|||
## explicit; go 1.14
|
||||
github.com/buckket/go-blurhash
|
||||
github.com/buckket/go-blurhash/base83
|
||||
# github.com/cespare/xxhash v1.1.0
|
||||
## explicit
|
||||
github.com/cespare/xxhash
|
||||
# github.com/coreos/go-oidc/v3 v3.4.0
|
||||
## explicit; go 1.14
|
||||
github.com/coreos/go-oidc/v3/oidc
|
||||
|
|
Loading…
Reference in a new issue