[chore]: Bump github.com/minio/minio-go/v7 from 7.0.85 to 7.0.86 (#3803)

Bumps [github.com/minio/minio-go/v7](https://github.com/minio/minio-go) from 7.0.85 to 7.0.86.
- [Release notes](https://github.com/minio/minio-go/releases)
- [Commits](https://github.com/minio/minio-go/compare/v7.0.85...v7.0.86)

---
updated-dependencies:
- dependency-name: github.com/minio/minio-go/v7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2025-02-17 11:42:50 +00:00 committed by GitHub
parent 5dc8009e30
commit 46e473f86e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 1313 additions and 412 deletions

7
go.mod
View file

@ -46,7 +46,7 @@ require (
github.com/k3a/html2text v1.2.1
github.com/microcosm-cc/bluemonday v1.0.27
github.com/miekg/dns v1.1.63
github.com/minio/minio-go/v7 v7.0.85
github.com/minio/minio-go/v7 v7.0.86
github.com/mitchellh/mapstructure v1.5.0
github.com/ncruces/go-sqlite3 v0.23.0
github.com/oklog/ulid v1.3.1
@ -81,7 +81,7 @@ require (
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.33.0
golang.org/x/image v0.24.0
golang.org/x/net v0.34.0
golang.org/x/net v0.35.0
golang.org/x/oauth2 v0.26.0
golang.org/x/sys v0.30.0
golang.org/x/text v0.22.0
@ -138,7 +138,7 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.24.0 // indirect
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
github.com/goccy/go-json v0.10.4 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/geo v0.0.0-20200319012246-673a6f80352d // indirect
@ -167,6 +167,7 @@ require (
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/minio/crc64nvme v1.0.0 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect

14
go.sum generated
View file

@ -236,8 +236,8 @@ github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
@ -401,10 +401,12 @@ github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwX
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/minio/crc64nvme v1.0.0 h1:MeLcBkCTD4pAoU7TciAfwsfxgkhM2u5hCe48hSEVFr0=
github.com/minio/crc64nvme v1.0.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v7 v7.0.85 h1:9psTLS/NTvC3MWoyjhjXpwcKoNbkongaCSF3PNpSuXo=
github.com/minio/minio-go/v7 v7.0.85/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY=
github.com/minio/minio-go/v7 v7.0.86 h1:DcgQ0AUjLJzRH6y/HrxiZ8CXarA70PAIufXHodP4s+k=
github.com/minio/minio-go/v7 v7.0.86/go.mod h1:VbfO4hYwUu3Of9WqGLBZ8vl3Hxnxo4ngxK4hzQDf4x4=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@ -747,8 +749,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=

View file

@ -2,6 +2,7 @@
import (
"reflect"
"sync"
"unsafe"
)
@ -24,7 +25,7 @@ type TypeAddr struct {
var (
typeAddr *TypeAddr
alreadyAnalyzed bool
once sync.Once
)
//go:linkname typelinks reflect.typelinks
@ -34,18 +35,13 @@ func typelinks() ([]unsafe.Pointer, [][]int32)
func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer
func AnalyzeTypeAddr() *TypeAddr {
defer func() {
alreadyAnalyzed = true
}()
if alreadyAnalyzed {
return typeAddr
}
once.Do(func() {
sections, offsets := typelinks()
if len(sections) != 1 {
return nil
return
}
if len(offsets) != 1 {
return nil
return
}
section := sections[0]
offset := offsets[0]
@ -78,7 +74,7 @@ func AnalyzeTypeAddr() *TypeAddr {
}
addrRange := max - min
if addrRange == 0 {
return nil
return
}
var addrShift uintptr
if isAligned64 {
@ -88,7 +84,7 @@ func AnalyzeTypeAddr() *TypeAddr {
}
cacheSize := addrRange >> addrShift
if cacheSize > maxAcceptableTypeAddrRange {
return nil
return
}
typeAddr = &TypeAddr{
BaseTypeAddr: min,
@ -96,5 +92,7 @@ func AnalyzeTypeAddr() *TypeAddr {
AddrRange: addrRange,
AddrShift: addrShift,
}
})
return typeAddr
}

202
vendor/github.com/minio/crc64nvme/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
vendor/github.com/minio/crc64nvme/README.md generated vendored Normal file
View file

@ -0,0 +1,20 @@
## crc64nvme
This Golang package calculates CRC64 checksums using carryless-multiplication accelerated with SIMD instructions for both ARM and x86. It is based on the NVME polynomial as specified in the [NVM Express® NVM Command Set Specification](https://nvmexpress.org/wp-content/uploads/NVM-Express-NVM-Command-Set-Specification-1.0d-2023.12.28-Ratified.pdf).
The code is based on the [crc64fast-nvme](https://github.com/awesomized/crc64fast-nvme.git) package in Rust and is released under the Apache 2.0 license.
For more background on the exact technique used, see this [Fast CRC Computation for Generic Polynomials Using PCLMULQDQ Instruction](https://web.archive.org/web/20131224125630/https://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf) paper.
### Performance
To follow.
### Requirements
All Go versions >= 1.22 are supported.
### Contributing
Contributions are welcome, please send PRs for any enhancements.

180
vendor/github.com/minio/crc64nvme/crc64.go generated vendored Normal file
View file

@ -0,0 +1,180 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
// Package crc64nvme implements the 64-bit cyclic redundancy check with NVME polynomial.
package crc64nvme
import (
"encoding/binary"
"errors"
"hash"
"sync"
"unsafe"
)
const (
// The size of a CRC-64 checksum in bytes.
Size = 8
// The NVME polynoimial (reversed, as used by Go)
NVME = 0x9a6c9329ac4bc9b5
)
var (
// precalculated table.
nvmeTable = makeTable(NVME)
)
// table is a 256-word table representing the polynomial for efficient processing.
type table [256]uint64
var (
slicing8TablesBuildOnce sync.Once
slicing8TableNVME *[8]table
)
func buildSlicing8TablesOnce() {
slicing8TablesBuildOnce.Do(buildSlicing8Tables)
}
func buildSlicing8Tables() {
slicing8TableNVME = makeSlicingBy8Table(makeTable(NVME))
}
func makeTable(poly uint64) *table {
t := new(table)
for i := 0; i < 256; i++ {
crc := uint64(i)
for j := 0; j < 8; j++ {
if crc&1 == 1 {
crc = (crc >> 1) ^ poly
} else {
crc >>= 1
}
}
t[i] = crc
}
return t
}
func makeSlicingBy8Table(t *table) *[8]table {
var helperTable [8]table
helperTable[0] = *t
for i := 0; i < 256; i++ {
crc := t[i]
for j := 1; j < 8; j++ {
crc = t[crc&0xff] ^ (crc >> 8)
helperTable[j][i] = crc
}
}
return &helperTable
}
// digest represents the partial evaluation of a checksum.
type digest struct {
crc uint64
}
// New creates a new hash.Hash64 computing the CRC-64 checksum using the
// NVME polynomial. Its Sum method will lay the
// value out in big-endian byte order. The returned Hash64 also
// implements [encoding.BinaryMarshaler] and [encoding.BinaryUnmarshaler] to
// marshal and unmarshal the internal state of the hash.
func New() hash.Hash64 { return &digest{0} }
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return 1 }
func (d *digest) Reset() { d.crc = 0 }
const (
magic = "crc\x02"
marshaledSize = len(magic) + 8 + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = binary.BigEndian.AppendUint64(b, tableSum)
b = binary.BigEndian.AppendUint64(b, d.crc)
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("hash/crc64: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("hash/crc64: invalid hash state size")
}
if tableSum != binary.BigEndian.Uint64(b[4:]) {
return errors.New("hash/crc64: tables do not match")
}
d.crc = binary.BigEndian.Uint64(b[12:])
return nil
}
func update(crc uint64, p []byte) uint64 {
if hasAsm && len(p) > 127 {
ptr := unsafe.Pointer(&p[0])
if align := (uintptr(ptr)+15)&^0xf - uintptr(ptr); align > 0 {
// Align to 16-byte boundary.
crc = update(crc, p[:align])
p = p[align:]
}
runs := len(p) / 128
crc = updateAsm(crc, p[:128*runs])
return update(crc, p[128*runs:])
}
buildSlicing8TablesOnce()
crc = ^crc
// table comparison is somewhat expensive, so avoid it for small sizes
for len(p) >= 64 {
var helperTable = slicing8TableNVME
// Update using slicing-by-8
for len(p) > 8 {
crc ^= binary.LittleEndian.Uint64(p)
crc = helperTable[7][crc&0xff] ^
helperTable[6][(crc>>8)&0xff] ^
helperTable[5][(crc>>16)&0xff] ^
helperTable[4][(crc>>24)&0xff] ^
helperTable[3][(crc>>32)&0xff] ^
helperTable[2][(crc>>40)&0xff] ^
helperTable[1][(crc>>48)&0xff] ^
helperTable[0][crc>>56]
p = p[8:]
}
}
// For reminders or small sizes
for _, v := range p {
crc = nvmeTable[byte(crc)^v] ^ (crc >> 8)
}
return ^crc
}
// Update returns the result of adding the bytes in p to the crc.
func Update(crc uint64, p []byte) uint64 {
return update(crc, p)
}
func (d *digest) Write(p []byte) (n int, err error) {
d.crc = update(d.crc, p)
return len(p), nil
}
func (d *digest) Sum64() uint64 { return d.crc }
func (d *digest) Sum(in []byte) []byte {
s := d.Sum64()
return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
}
// Checksum returns the CRC-64 checksum of data
// using the NVME polynomial.
func Checksum(data []byte) uint64 { return update(0, data) }
// ISO tablesum of NVME poly
const tableSum = 0x8ddd9ee4402c7163

15
vendor/github.com/minio/crc64nvme/crc64_amd64.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
//go:build !noasm && !appengine && !gccgo
package crc64nvme
import (
"github.com/klauspost/cpuid/v2"
)
var hasAsm = cpuid.CPU.Supports(cpuid.SSE2, cpuid.CLMUL, cpuid.SSE4)
func updateAsm(crc uint64, p []byte) (checksum uint64)

155
vendor/github.com/minio/crc64nvme/crc64_amd64.s generated vendored Normal file
View file

@ -0,0 +1,155 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
#include "textflag.h"
TEXT ·updateAsm(SB), $0-40
MOVQ crc+0(FP), AX // checksum
MOVQ p_base+8(FP), SI // start pointer
MOVQ p_len+16(FP), CX // length of buffer
NOTQ AX
SHRQ $7, CX
CMPQ CX, $1
JLT skip128
VMOVDQA 0x00(SI), X0
VMOVDQA 0x10(SI), X1
VMOVDQA 0x20(SI), X2
VMOVDQA 0x30(SI), X3
VMOVDQA 0x40(SI), X4
VMOVDQA 0x50(SI), X5
VMOVDQA 0x60(SI), X6
VMOVDQA 0x70(SI), X7
MOVQ AX, X8
PXOR X8, X0
CMPQ CX, $1
JE tail128
MOVQ $0xa1ca681e733f9c40, AX
MOVQ AX, X8
MOVQ $0x5f852fb61e8d92dc, AX
PINSRQ $0x1, AX, X9
loop128:
ADDQ $128, SI
SUBQ $1, CX
VMOVDQA X0, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X0
PXOR X10, X0
PXOR 0(SI), X0
VMOVDQA X1, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X1
PXOR X10, X1
PXOR 0x10(SI), X1
VMOVDQA X2, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X2
PXOR X10, X2
PXOR 0x20(SI), X2
VMOVDQA X3, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X3
PXOR X10, X3
PXOR 0x30(SI), X3
VMOVDQA X4, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X4
PXOR X10, X4
PXOR 0x40(SI), X4
VMOVDQA X5, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X5
PXOR X10, X5
PXOR 0x50(SI), X5
VMOVDQA X6, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X6
PXOR X10, X6
PXOR 0x60(SI), X6
VMOVDQA X7, X10
PCLMULQDQ $0x00, X8, X10
PCLMULQDQ $0x11, X9, X7
PXOR X10, X7
PXOR 0x70(SI), X7
CMPQ CX, $1
JGT loop128
tail128:
MOVQ $0xd083dd594d96319d, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X0, X11
MOVQ $0x946588403d4adcbc, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X0
PXOR X11, X7
PXOR X0, X7
MOVQ $0x3c255f5ebc414423, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X1, X11
MOVQ $0x34f5a24e22d66e90, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X1
PXOR X11, X1
PXOR X7, X1
MOVQ $0x7b0ab10dd0f809fe, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X2, X11
MOVQ $0x03363823e6e791e5, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X2
PXOR X11, X2
PXOR X1, X2
MOVQ $0x0c32cdb31e18a84a, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X3, X11
MOVQ $0x62242240ace5045a, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X3
PXOR X11, X3
PXOR X2, X3
MOVQ $0xbdd7ac0ee1a4a0f0, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X4, X11
MOVQ $0xa3ffdc1fe8e82a8b, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X4
PXOR X11, X4
PXOR X3, X4
MOVQ $0xb0bc2e589204f500, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X5, X11
MOVQ $0xe1e0bb9d45d7a44c, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X5
PXOR X11, X5
PXOR X4, X5
MOVQ $0xeadc41fd2ba3d420, AX
MOVQ AX, X11
PCLMULQDQ $0x00, X6, X11
MOVQ $0x21e9761e252621ac, AX
PINSRQ $0x1, AX, X12
PCLMULQDQ $0x11, X12, X6
PXOR X11, X6
PXOR X5, X6
MOVQ AX, X5
PCLMULQDQ $0x00, X6, X5
PSHUFD $0xee, X6, X6
PXOR X5, X6
MOVQ $0x27ecfa329aef9f77, AX
MOVQ AX, X4
PCLMULQDQ $0x00, X4, X6
PEXTRQ $0, X6, BX
MOVQ $0x34d926535897936b, AX
MOVQ AX, X4
PCLMULQDQ $0x00, X4, X6
PXOR X5, X6
PEXTRQ $1, X6, AX
XORQ BX, AX
skip128:
NOTQ AX
MOVQ AX, checksum+32(FP)
RET

15
vendor/github.com/minio/crc64nvme/crc64_arm64.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
//go:build !noasm && !appengine && !gccgo
package crc64nvme
import (
"github.com/klauspost/cpuid/v2"
)
var hasAsm = cpuid.CPU.Supports(cpuid.ASIMD) && cpuid.CPU.Supports(cpuid.PMULL)
func updateAsm(crc uint64, p []byte) (checksum uint64)

155
vendor/github.com/minio/crc64nvme/crc64_arm64.s generated vendored Normal file
View file

@ -0,0 +1,155 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
#include "textflag.h"
TEXT ·updateAsm(SB), $0-40
MOVD crc+0(FP), R0 // checksum
MOVD p_base+8(FP), R1 // start pointer
MOVD p_len+16(FP), R2 // length of buffer
MOVD $·const(SB), R3 // constants
MVN R0, R0
LSR $7, R2, R2
CMP $1, R2
BLT skip128
FLDPQ (R1), (F0, F1)
FLDPQ 32(R1), (F2, F3)
FLDPQ 64(R1), (F4, F5)
FLDPQ 96(R1), (F6, F7)
FMOVD R0, F8
VMOVI $0, V9.B16
VMOV V9.D[0], V8.D[1]
VEOR V8.B16, V0.B16, V0.B16
CMP $1, R2
BEQ tail128
MOVD 112(R3), R4
MOVD 120(R3), R5
FMOVD R4, F8
VDUP R5, V9.D2
loop128:
ADD $128, R1, R1
SUB $1, R2, R2
VPMULL V0.D1, V8.D1, V10.Q1
VPMULL2 V0.D2, V9.D2, V0.Q1
FLDPQ (R1), (F11, F12)
VEOR3 V0.B16, V11.B16, V10.B16, V0.B16
VPMULL V1.D1, V8.D1, V10.Q1
VPMULL2 V1.D2, V9.D2, V1.Q1
VEOR3 V1.B16, V12.B16, V10.B16, V1.B16
VPMULL V2.D1, V8.D1, V10.Q1
VPMULL2 V2.D2, V9.D2, V2.Q1
FLDPQ 32(R1), (F11, F12)
VEOR3 V2.B16, V11.B16, V10.B16, V2.B16
VPMULL V3.D1, V8.D1, V10.Q1
VPMULL2 V3.D2, V9.D2, V3.Q1
VEOR3 V3.B16, V12.B16, V10.B16, V3.B16
VPMULL V4.D1, V8.D1, V10.Q1
VPMULL2 V4.D2, V9.D2, V4.Q1
FLDPQ 64(R1), (F11, F12)
VEOR3 V4.B16, V11.B16, V10.B16, V4.B16
VPMULL V5.D1, V8.D1, V10.Q1
VPMULL2 V5.D2, V9.D2, V5.Q1
VEOR3 V5.B16, V12.B16, V10.B16, V5.B16
VPMULL V6.D1, V8.D1, V10.Q1
VPMULL2 V6.D2, V9.D2, V6.Q1
FLDPQ 96(R1), (F11, F12)
VEOR3 V6.B16, V11.B16, V10.B16, V6.B16
VPMULL V7.D1, V8.D1, V10.Q1
VPMULL2 V7.D2, V9.D2, V7.Q1
VEOR3 V7.B16, V12.B16, V10.B16, V7.B16
CMP $1, R2
BHI loop128
tail128:
MOVD (R3), R4
FMOVD R4, F11
VPMULL V0.D1, V11.D1, V11.Q1
MOVD 8(R3), R4
VDUP R4, V12.D2
VPMULL2 V0.D2, V12.D2, V0.Q1
VEOR3 V0.B16, V7.B16, V11.B16, V7.B16
MOVD 16(R3), R4
FMOVD R4, F11
VPMULL V1.D1, V11.D1, V11.Q1
MOVD 24(R3), R4
VDUP R4, V12.D2
VPMULL2 V1.D2, V12.D2, V1.Q1
VEOR3 V1.B16, V11.B16, V7.B16, V1.B16
MOVD 32(R3), R4
FMOVD R4, F11
VPMULL V2.D1, V11.D1, V11.Q1
MOVD 40(R3), R4
VDUP R4, V12.D2
VPMULL2 V2.D2, V12.D2, V2.Q1
VEOR3 V2.B16, V11.B16, V1.B16, V2.B16
MOVD 48(R3), R4
FMOVD R4, F11
VPMULL V3.D1, V11.D1, V11.Q1
MOVD 56(R3), R4
VDUP R4, V12.D2
VPMULL2 V3.D2, V12.D2, V3.Q1
VEOR3 V3.B16, V11.B16, V2.B16, V3.B16
MOVD 64(R3), R4
FMOVD R4, F11
VPMULL V4.D1, V11.D1, V11.Q1
MOVD 72(R3), R4
VDUP R4, V12.D2
VPMULL2 V4.D2, V12.D2, V4.Q1
VEOR3 V4.B16, V11.B16, V3.B16, V4.B16
MOVD 80(R3), R4
FMOVD R4, F11
VPMULL V5.D1, V11.D1, V11.Q1
MOVD 88(R3), R4
VDUP R4, V12.D2
VPMULL2 V5.D2, V12.D2, V5.Q1
VEOR3 V5.B16, V11.B16, V4.B16, V5.B16
MOVD 96(R3), R4
FMOVD R4, F11
VPMULL V6.D1, V11.D1, V11.Q1
MOVD 104(R3), R4
VDUP R4, V12.D2
VPMULL2 V6.D2, V12.D2, V6.Q1
VEOR3 V6.B16, V11.B16, V5.B16, V6.B16
FMOVD R4, F5
VPMULL V6.D1, V5.D1, V5.Q1
VDUP V6.D[1], V6.D2
VEOR V5.B8, V6.B8, V6.B8
MOVD 128(R3), R4
FMOVD R4, F4
VPMULL V4.D1, V6.D1, V6.Q1
FMOVD F6, R4
MOVD 136(R3), R5
FMOVD R5, F4
VPMULL V4.D1, V6.D1, V6.Q1
VEOR V6.B16, V5.B16, V6.B16
VMOV V6.D[1], R5
EOR R4, R5, R0
skip128:
MVN R0, R0
MOVD R0, checksum+32(FP)
RET
DATA ·const+0x000(SB)/8, $0xd083dd594d96319d // K_959
DATA ·const+0x008(SB)/8, $0x946588403d4adcbc // K_895
DATA ·const+0x010(SB)/8, $0x3c255f5ebc414423 // K_831
DATA ·const+0x018(SB)/8, $0x34f5a24e22d66e90 // K_767
DATA ·const+0x020(SB)/8, $0x7b0ab10dd0f809fe // K_703
DATA ·const+0x028(SB)/8, $0x03363823e6e791e5 // K_639
DATA ·const+0x030(SB)/8, $0x0c32cdb31e18a84a // K_575
DATA ·const+0x038(SB)/8, $0x62242240ace5045a // K_511
DATA ·const+0x040(SB)/8, $0xbdd7ac0ee1a4a0f0 // K_447
DATA ·const+0x048(SB)/8, $0xa3ffdc1fe8e82a8b // K_383
DATA ·const+0x050(SB)/8, $0xb0bc2e589204f500 // K_319
DATA ·const+0x058(SB)/8, $0xe1e0bb9d45d7a44c // K_255
DATA ·const+0x060(SB)/8, $0xeadc41fd2ba3d420 // K_191
DATA ·const+0x068(SB)/8, $0x21e9761e252621ac // K_127
DATA ·const+0x070(SB)/8, $0xa1ca681e733f9c40 // K_1087
DATA ·const+0x078(SB)/8, $0x5f852fb61e8d92dc // K_1023
DATA ·const+0x080(SB)/8, $0x27ecfa329aef9f77 // MU
DATA ·const+0x088(SB)/8, $0x34d926535897936b // POLY
GLOBL ·const(SB), (NOPTR+RODATA), $144

9
vendor/github.com/minio/crc64nvme/crc64_other.go generated vendored Normal file
View file

@ -0,0 +1,9 @@
// Copyright (c) 2025 Minio Inc. All rights reserved.
// Use of this source code is governed by a license that can be
// found in the LICENSE file.
//go:build (!amd64 || noasm || appengine || gccgo) && (!arm64 || noasm || appengine || gccgo)
package crc64nvme
var hasAsm = false

View file

@ -155,7 +155,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "v7.0.85"
libraryVersion = "v7.0.86"
)
// User Agent should always following the below style.

View file

@ -30,6 +30,8 @@
"math/bits"
"net/http"
"sort"
"github.com/minio/crc64nvme"
)
// ChecksumType contains information about the checksum type.
@ -152,9 +154,6 @@ func (c ChecksumType) RawByteLen() int {
const crc64NVMEPolynomial = 0xad93d23594c93659
// crc64 uses reversed polynomials.
var crc64Table = crc64.MakeTable(bits.Reverse64(crc64NVMEPolynomial))
// Hasher returns a hasher corresponding to the checksum type.
// Returns nil if no checksum.
func (c ChecksumType) Hasher() hash.Hash {
@ -168,7 +167,7 @@ func (c ChecksumType) Hasher() hash.Hash {
case ChecksumSHA256:
return sha256.New()
case ChecksumCRC64NVME:
return crc64.New(crc64Table)
return crc64nvme.New()
}
return nil
}

View file

@ -38,7 +38,15 @@
logFrameWrites bool
logFrameReads bool
inTests bool
disableExtendedConnectProtocol bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
// package doesn't support extended CONNECT.
//
// Disable extended CONNECT by default for now.
//
// Issue #71128.
disableExtendedConnectProtocol = true
)
func init() {
@ -51,8 +59,8 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
if strings.Contains(e, "http2xconnect=0") {
disableExtendedConnectProtocol = true
if strings.Contains(e, "http2xconnect=1") {
disableExtendedConnectProtocol = false
}
}
@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).

View file

@ -50,6 +50,7 @@
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -812,8 +813,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
buildCommonHeaderMapsOnce()
cv, ok := commonCanonHeader[v]
cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}

View file

@ -25,7 +25,6 @@
"net/http"
"net/http/httptrace"
"net/textproto"
"sort"
"strconv"
"strings"
"sync"
@ -35,6 +34,7 @@
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
func commaSeparatedTrailers(req *http.Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = canonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
@ -1303,35 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
return 0
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
// Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func actualContentLength(req *http.Request) int64 {
if req.Body == nil || req.Body == http.NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
func (cc *ClientConn) decrStreamReservations() {
cc.mu.Lock()
defer cc.mu.Unlock()
@ -1356,7 +1310,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
reqCancel: req.Cancel,
isHead: req.Method == "HEAD",
reqBody: req.Body,
reqBodyContentLength: actualContentLength(req),
reqBodyContentLength: httpcommon.ActualContentLength(req),
trace: httptrace.ContextClientTrace(ctx),
peerClosed: make(chan struct{}),
abort: make(chan struct{}),
@ -1364,25 +1318,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
donec: make(chan struct{}),
}
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
!cs.isHead {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
cs.requestedGzip = true
}
cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression())
go cs.doRequest(req, streamf)
@ -1413,7 +1349,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
}
res.Request = req
res.TLS = cc.tlsState
if res.Body == noBody && actualContentLength(req) == 0 {
if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 {
// If there isn't a request or response body still being
// written, then wait for the stream to be closed before
// RoundTrip returns.
@ -1496,10 +1432,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
cc := cs.cc
ctx := cs.ctx
if err := checkConnHeaders(req); err != nil {
return err
}
// wait for setting frames to be received, a server can change this value later,
// but we just wait for the first settings frame
var isExtendedConnect bool
@ -1663,20 +1595,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
trailers, err := commaSeparatedTrailers(req)
cc.hbuf.Reset()
res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{
Request: req,
AddGzipHeader: cs.requestedGzip,
PeerMaxHeaderListSize: cc.peerMaxHeaderListSize,
DefaultUserAgent: defaultUserAgent,
}, func(name, value string) {
cc.writeHeader(name, value)
})
if err != nil {
return err
}
hasTrailers := trailers != ""
contentLen := actualContentLength(req)
hasBody := contentLen != 0
hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
if err != nil {
return err
return fmt.Errorf("http2: %w", err)
}
hdrs := cc.hbuf.Bytes()
// Write the request.
endStream := !hasBody && !hasTrailers
endStream := !res.HasBody && !res.HasTrailers
cs.sentHeaders = true
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
@ -2070,218 +2004,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
var errNilRequestURL = errors.New("http2: Request.URI is nil")
func isNormalConnect(req *http.Request) bool {
return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
if req.URL == nil {
return nil, errNilRequestURL
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return nil, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = http.MethodGet
}
f(":method", m)
if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if addGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", defaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > cc.peerMaxHeaderListSize {
return nil, errRequestHeaderListSize
}
trace := httptrace.ContextClientTrace(req.Context())
traceHeaders := traceHasWroteHeaderField(trace)
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := lowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
cc.writeHeader(name, value)
if traceHeaders {
traceWroteHeaderField(trace, name, value)
}
})
return cc.hbuf.Bytes(), nil
}
// shouldSendReqContentLength reports whether the http2.Transport should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
@ -2298,7 +2020,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
}
for k, vv := range trailer {
lowKey, ascii := lowerHeader(k)
lowKey, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
@ -2653,7 +2375,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range regularFields {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
@ -2661,7 +2383,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
t[canonicalHeader(v)] = nil
t[httpcommon.CanonicalHeader(v)] = nil
})
} else {
vv := header[key]
@ -2785,7 +2507,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
trailer := make(http.Header)
for _, hf := range f.RegularFields() {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
@ -3331,7 +3053,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@ -3515,16 +3237,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
}
}
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse

View file

@ -13,6 +13,7 @@
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
// writeFramer is implemented by any type that is used to write frames.
@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
for _, k := range keys {
vv := h[k]
k, ascii := lowerHeader(k)
k, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).

53
vendor/golang.org/x/net/internal/httpcommon/ascii.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import "strings"
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
// contains helper functions which may use Unicode-aware functions which would
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
if len(s) != len(t) {
return false
}
for i := 0; i < len(s); i++ {
if lower(s[i]) != lower(t[i]) {
return false
}
}
return true
}
// lower returns the ASCII lowercase version of b.
func lower(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// isASCIIPrint returns whether s is ASCII and printable according to
// https://tools.ietf.org/html/rfc20#section-4.2.
func isASCIIPrint(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] < ' ' || s[i] > '~' {
return false
}
}
return true
}
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
// and whether or not it was.
func asciiToLower(s string) (lower string, ok bool) {
if !isASCIIPrint(s) {
return "", false
}
return strings.ToLower(s), true
}

View file

@ -1,8 +1,8 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
package httpcommon
import (
"net/http"
@ -88,7 +88,9 @@ func buildCommonHeaderMaps() {
}
}
func lowerHeader(v string) (lower string, ascii bool) {
// LowerHeader returns the lowercase form of a header name,
// used on the wire for HTTP/2 and HTTP/3 requests.
func LowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
return asciiToLower(v)
}
func canonicalHeader(v string) string {
// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
func CanonicalHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonCanonHeader[v]; ok {
return s
}
return http.CanonicalHeaderKey(v)
}
// CachedCanonicalHeader returns the canonical form of a well-known header name.
func CachedCanonicalHeader(v string) (string, bool) {
buildCommonHeaderMapsOnce()
s, ok := commonCanonHeader[v]
return s, ok
}

379
vendor/golang.org/x/net/internal/httpcommon/request.go generated vendored Normal file
View file

@ -0,0 +1,379 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import (
"errors"
"fmt"
"net/http"
"net/http/httptrace"
"sort"
"strconv"
"strings"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
)
var (
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
)
// EncodeHeadersParam is parameters to EncodeHeaders.
type EncodeHeadersParam struct {
Request *http.Request
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
// added to the request.
AddGzipHeader bool
// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
PeerMaxHeaderListSize uint64
// DefaultUserAgent is the User-Agent header to send when the request
// neither contains a User-Agent nor disables it.
DefaultUserAgent string
}
// EncodeHeadersParam is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
}
// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
// It validates a request and calls headerf with each pseudo-header and header
// for the request.
// The headerf function is called with the validated, canonicalized header name.
func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
req := param.Request
// Check for invalid connection-level headers.
if err := checkConnHeaders(req); err != nil {
return res, err
}
if req.URL == nil {
return res, errors.New("Request.URL is nil")
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return res, err
}
if !httpguts.ValidHostHeader(host) {
return res, errors.New("invalid Host header")
}
// isNormalConnect is true if this is a non-extended CONNECT request.
isNormalConnect := false
protocol := req.Header.Get(":protocol")
if req.Method == "CONNECT" && protocol == "" {
isNormalConnect = true
} else if protocol != "" && req.Method != "CONNECT" {
return res, errors.New("invalid :protocol header in non-CONNECT request")
}
// Validate the path, except for non-extended CONNECT requests which have no path.
var path string
if !isNormalConnect {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return res, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return res, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return res, fmt.Errorf("invalid HTTP trailer %s", err)
}
contentLength := ActualContentLength(req)
trailers, err := commaSeparatedTrailers(req)
if err != nil {
return res, err
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = http.MethodGet
}
f(":method", m)
if !isNormalConnect {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if protocol != "" {
f(":protocol", protocol)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
} else if k == ":protocol" {
// :protocol pseudo-header was already sent above.
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if param.AddGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", param.DefaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
if param.PeerMaxHeaderListSize > 0 {
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > param.PeerMaxHeaderListSize {
return res, ErrRequestHeaderListSize
}
}
trace := httptrace.ContextClientTrace(req.Context())
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := LowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
headerf(name, value)
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(name, []string{value})
}
})
res.HasBody = contentLength != 0
res.HasTrailers = trailers != ""
return res, nil
}
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
// for a request.
func IsRequestGzip(req *http.Request, disableCompression bool) bool {
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !disableCompression &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
req.Method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
return true
}
return false
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
//
// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
//
// Certain headers are special-cased as okay but not transmitted later.
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("invalid Connection request header: %q", vv)
}
return nil
}
func commaSeparatedTrailers(req *http.Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = CanonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
// ActualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
func ActualContentLength(req *http.Request) int64 {
if req.Body == nil || req.Body == http.NoBody {
return 0
}
if req.ContentLength != 0 {
return req.ContentLength
}
return -1
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
// shouldSendReqContentLength reports whether we should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}

10
vendor/modules.txt vendored
View file

@ -304,7 +304,7 @@ github.com/go-swagger/go-swagger/generator
# github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b
## explicit
github.com/go-xmlfmt/xmlfmt
# github.com/goccy/go-json v0.10.4
# github.com/goccy/go-json v0.10.5
## explicit; go 1.19
github.com/goccy/go-json
github.com/goccy/go-json/internal/decoder
@ -466,10 +466,13 @@ github.com/microcosm-cc/bluemonday/css
# github.com/miekg/dns v1.1.63
## explicit; go 1.19
github.com/miekg/dns
# github.com/minio/crc64nvme v1.0.0
## explicit; go 1.22
github.com/minio/crc64nvme
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
# github.com/minio/minio-go/v7 v7.0.85
# github.com/minio/minio-go/v7 v7.0.86
## explicit; go 1.22
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/cors
@ -1092,7 +1095,7 @@ golang.org/x/image/webp
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.34.0
# golang.org/x/net v0.35.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/context
@ -1103,6 +1106,7 @@ golang.org/x/net/http2
golang.org/x/net/http2/h2c
golang.org/x/net/http2/hpack
golang.org/x/net/idna
golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/iana
golang.org/x/net/internal/socket
golang.org/x/net/internal/timeseries