mirror of
https://github.com/caddyserver/caddy.git
synced 2025-03-22 13:29:27 +01:00
Merge branch 'master' into script_filename-fix
This commit is contained in:
commit
553acf93e2
50 changed files with 4240 additions and 199 deletions
|
@ -70,6 +70,8 @@ Then make sure the `caddy` binary is in your PATH.
|
||||||
|
|
||||||
To build for other platforms, use build.go with the `--goos` and `--goarch` flags.
|
To build for other platforms, use build.go with the `--goos` and `--goarch` flags.
|
||||||
|
|
||||||
|
When building from source, telemetry is enabled by default. You can disable it by changing `enableTelemetry` in run.go before compiling, or use the `-disabled-metrics` flag at runtime to disable only certain metrics.
|
||||||
|
|
||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
|
|
6
caddy.go
6
caddy.go
|
@ -44,6 +44,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mholt/caddy/caddyfile"
|
"github.com/mholt/caddy/caddyfile"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configurable application parameters
|
// Configurable application parameters
|
||||||
|
@ -122,6 +123,7 @@ type Instance struct {
|
||||||
StorageMu sync.RWMutex
|
StorageMu sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Instances returns the list of instances.
|
||||||
func Instances() []*Instance {
|
func Instances() []*Instance {
|
||||||
return instances
|
return instances
|
||||||
}
|
}
|
||||||
|
@ -615,6 +617,8 @@ func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bo
|
||||||
return fmt.Errorf("error inspecting server blocks: %v", err)
|
return fmt.Errorf("error inspecting server blocks: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
telemetry.Set("num_server_blocks", len(sblocks))
|
||||||
|
|
||||||
return executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
|
return executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -869,7 +873,7 @@ func Stop() error {
|
||||||
// explicitly like a common local hostname. addr must only
|
// explicitly like a common local hostname. addr must only
|
||||||
// be a host or a host:port combination.
|
// be a host or a host:port combination.
|
||||||
func IsLoopback(addr string) bool {
|
func IsLoopback(addr string) bool {
|
||||||
host, _, err := net.SplitHostPort(addr)
|
host, _, err := net.SplitHostPort(strings.ToLower(addr))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host = addr // happens if the addr is just a hostname
|
host = addr // happens if the addr is just a hostname
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,25 +15,27 @@
|
||||||
package caddymain
|
package caddymain
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/klauspost/cpuid"
|
||||||
|
"github.com/mholt/caddy"
|
||||||
|
"github.com/mholt/caddy/caddytls"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
|
"github.com/xenolf/lego/acmev2"
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
|
|
||||||
"github.com/xenolf/lego/acmev2"
|
_ "github.com/mholt/caddy/caddyhttp" // plug in the HTTP server type
|
||||||
|
|
||||||
"github.com/mholt/caddy"
|
|
||||||
// plug in the HTTP server type
|
|
||||||
_ "github.com/mholt/caddy/caddyhttp"
|
|
||||||
|
|
||||||
"github.com/mholt/caddy/caddytls"
|
|
||||||
// This is where other plugins get plugged in (imported)
|
// This is where other plugins get plugged in (imported)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,6 +47,7 @@ func init() {
|
||||||
flag.StringVar(&caddytls.DefaultCAUrl, "ca", "https://acme-v02.api.letsencrypt.org/directory", "URL to certificate authority's ACME server directory")
|
flag.StringVar(&caddytls.DefaultCAUrl, "ca", "https://acme-v02.api.letsencrypt.org/directory", "URL to certificate authority's ACME server directory")
|
||||||
flag.BoolVar(&caddytls.DisableHTTPChallenge, "disable-http-challenge", caddytls.DisableHTTPChallenge, "Disable the ACME HTTP challenge")
|
flag.BoolVar(&caddytls.DisableHTTPChallenge, "disable-http-challenge", caddytls.DisableHTTPChallenge, "Disable the ACME HTTP challenge")
|
||||||
flag.BoolVar(&caddytls.DisableTLSSNIChallenge, "disable-tls-sni-challenge", caddytls.DisableTLSSNIChallenge, "Disable the ACME TLS-SNI challenge")
|
flag.BoolVar(&caddytls.DisableTLSSNIChallenge, "disable-tls-sni-challenge", caddytls.DisableTLSSNIChallenge, "Disable the ACME TLS-SNI challenge")
|
||||||
|
flag.StringVar(&disabledMetrics, "disabled-metrics", "", "Comma-separated list of telemetry metrics to disable")
|
||||||
flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
|
flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")")
|
||||||
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
|
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
|
||||||
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
|
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
|
||||||
|
@ -87,6 +90,16 @@ func Run() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initialize telemetry client
|
||||||
|
if enableTelemetry {
|
||||||
|
err := initTelemetry()
|
||||||
|
if err != nil {
|
||||||
|
mustLogFatalf("[ERROR] Initializing telemetry: %v", err)
|
||||||
|
}
|
||||||
|
} else if disabledMetrics != "" {
|
||||||
|
mustLogFatalf("[ERROR] Cannot disable specific metrics because telemetry is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
// Check for one-time actions
|
// Check for one-time actions
|
||||||
if revoke != "" {
|
if revoke != "" {
|
||||||
err := caddytls.Revoke(revoke)
|
err := caddytls.Revoke(revoke)
|
||||||
|
@ -143,6 +156,26 @@ func Run() {
|
||||||
// Execute instantiation events
|
// Execute instantiation events
|
||||||
caddy.EmitEvent(caddy.InstanceStartupEvent, instance)
|
caddy.EmitEvent(caddy.InstanceStartupEvent, instance)
|
||||||
|
|
||||||
|
// Begin telemetry (these are no-ops if telemetry disabled)
|
||||||
|
telemetry.Set("caddy_version", appVersion)
|
||||||
|
telemetry.Set("num_listeners", len(instance.Servers()))
|
||||||
|
telemetry.Set("server_type", serverType)
|
||||||
|
telemetry.Set("os", runtime.GOOS)
|
||||||
|
telemetry.Set("arch", runtime.GOARCH)
|
||||||
|
telemetry.Set("cpu", struct {
|
||||||
|
BrandName string `json:"brand_name,omitempty"`
|
||||||
|
NumLogical int `json:"num_logical,omitempty"`
|
||||||
|
AESNI bool `json:"aes_ni,omitempty"`
|
||||||
|
}{
|
||||||
|
BrandName: cpuid.CPU.BrandName,
|
||||||
|
NumLogical: runtime.NumCPU(),
|
||||||
|
AESNI: cpuid.CPU.AesNi(),
|
||||||
|
})
|
||||||
|
if containerized := detectContainer(); containerized {
|
||||||
|
telemetry.Set("container", containerized)
|
||||||
|
}
|
||||||
|
telemetry.StartEmitting()
|
||||||
|
|
||||||
// Twiddle your thumbs
|
// Twiddle your thumbs
|
||||||
instance.Wait()
|
instance.Wait()
|
||||||
}
|
}
|
||||||
|
@ -266,6 +299,116 @@ func setCPU(cpu string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// detectContainer attempts to determine whether the process is
|
||||||
|
// being run inside a container. References:
|
||||||
|
// https://tuhrig.de/how-to-know-you-are-inside-a-docker-container/
|
||||||
|
// https://stackoverflow.com/a/20012536/1048862
|
||||||
|
// https://gist.github.com/anantkamath/623ce7f5432680749e087cf8cfba9b69
|
||||||
|
func detectContainer() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err := os.Open("/proc/1/cgroup")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
i++
|
||||||
|
if i > 1000 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
line := scanner.Text()
|
||||||
|
parts := strings.SplitN(line, ":", 3)
|
||||||
|
if len(parts) < 3 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(parts[2], "docker") ||
|
||||||
|
strings.Contains(parts[2], "lxc") ||
|
||||||
|
strings.Contains(parts[2], "moby") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// initTelemetry initializes the telemetry engine.
|
||||||
|
func initTelemetry() error {
|
||||||
|
uuidFilename := filepath.Join(caddy.AssetsPath(), "uuid")
|
||||||
|
if customUUIDFile := os.Getenv("CADDY_UUID_FILE"); customUUIDFile != "" {
|
||||||
|
uuidFilename = customUUIDFile
|
||||||
|
}
|
||||||
|
|
||||||
|
newUUID := func() uuid.UUID {
|
||||||
|
id := uuid.New()
|
||||||
|
err := ioutil.WriteFile(uuidFilename, []byte(id.String()), 0600) // human-readable as a string
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Persisting instance UUID: %v", err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
var id uuid.UUID
|
||||||
|
|
||||||
|
// load UUID from storage, or create one if we don't have one
|
||||||
|
if uuidFile, err := os.Open(uuidFilename); os.IsNotExist(err) {
|
||||||
|
// no UUID exists yet; create a new one and persist it
|
||||||
|
id = newUUID()
|
||||||
|
} else if err != nil {
|
||||||
|
log.Printf("[ERROR] Loading persistent UUID: %v", err)
|
||||||
|
id = newUUID()
|
||||||
|
} else {
|
||||||
|
defer uuidFile.Close()
|
||||||
|
uuidBytes, err := ioutil.ReadAll(uuidFile)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Reading persistent UUID: %v", err)
|
||||||
|
id = newUUID()
|
||||||
|
} else {
|
||||||
|
id, err = uuid.ParseBytes(uuidBytes)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Parsing UUID: %v", err)
|
||||||
|
id = newUUID()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse and check the list of disabled metrics
|
||||||
|
var disabledMetricsSlice []string
|
||||||
|
if len(disabledMetrics) > 0 {
|
||||||
|
if len(disabledMetrics) > 1024 {
|
||||||
|
// mitigate disk space exhaustion at the collection endpoint
|
||||||
|
return fmt.Errorf("too many metrics to disable")
|
||||||
|
}
|
||||||
|
disabledMetricsSlice = strings.Split(disabledMetrics, ",")
|
||||||
|
for i, metric := range disabledMetricsSlice {
|
||||||
|
if metric == "instance_id" || metric == "timestamp" || metric == "disabled_metrics" {
|
||||||
|
return fmt.Errorf("instance_id, timestamp, and disabled_metrics cannot be disabled")
|
||||||
|
}
|
||||||
|
if metric == "" {
|
||||||
|
disabledMetricsSlice = append(disabledMetricsSlice[:i], disabledMetricsSlice[i+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize telemetry
|
||||||
|
telemetry.Init(id, disabledMetricsSlice)
|
||||||
|
|
||||||
|
// if any metrics were disabled, report which ones (so we know how representative the data is)
|
||||||
|
if len(disabledMetricsSlice) > 0 {
|
||||||
|
telemetry.Set("disabled_metrics", disabledMetricsSlice)
|
||||||
|
log.Printf("[NOTICE] The following telemetry metrics are disabled: %s", disabledMetrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
const appName = "Caddy"
|
const appName = "Caddy"
|
||||||
|
|
||||||
// Flags that control program flow or startup
|
// Flags that control program flow or startup
|
||||||
|
@ -278,6 +421,7 @@ var (
|
||||||
version bool
|
version bool
|
||||||
plugins bool
|
plugins bool
|
||||||
validate bool
|
validate bool
|
||||||
|
disabledMetrics string
|
||||||
)
|
)
|
||||||
|
|
||||||
// Build information obtained with the help of -ldflags
|
// Build information obtained with the help of -ldflags
|
||||||
|
@ -292,3 +436,5 @@ var (
|
||||||
gitShortStat string // git diff-index --shortstat
|
gitShortStat string // git diff-index --shortstat
|
||||||
gitFilesModified string // git diff-index --name-only HEAD
|
gitFilesModified string // git diff-index --name-only HEAD
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const enableTelemetry = true
|
||||||
|
|
|
@ -20,6 +20,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Parse parses the input just enough to group tokens, in
|
// Parse parses the input just enough to group tokens, in
|
||||||
|
@ -374,6 +376,7 @@ func (p *parser) directive() error {
|
||||||
|
|
||||||
// The directive itself is appended as a relevant token
|
// The directive itself is appended as a relevant token
|
||||||
p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
|
p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
|
||||||
|
telemetry.AppendUnique("directives", dir)
|
||||||
|
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
if p.Val() == "{" {
|
if p.Val() == "{" {
|
||||||
|
|
|
@ -24,6 +24,9 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/caddytls"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// tlsHandler is a http.Handler that will inject a value
|
// tlsHandler is a http.Handler that will inject a value
|
||||||
|
@ -49,6 +52,9 @@ type tlsHandler struct {
|
||||||
// Halderman, et. al. in "The Security Impact of HTTPS Interception" (NDSS '17):
|
// Halderman, et. al. in "The Security Impact of HTTPS Interception" (NDSS '17):
|
||||||
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
|
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
|
||||||
func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// TODO: one request per connection, we should report UA in connection with
|
||||||
|
// handshake (reported in caddytls package) and our MITM assessment
|
||||||
|
|
||||||
if h.listener == nil {
|
if h.listener == nil {
|
||||||
h.next.ServeHTTP(w, r)
|
h.next.ServeHTTP(w, r)
|
||||||
return
|
return
|
||||||
|
@ -59,6 +65,10 @@ func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
h.listener.helloInfosMu.RUnlock()
|
h.listener.helloInfosMu.RUnlock()
|
||||||
|
|
||||||
ua := r.Header.Get("User-Agent")
|
ua := r.Header.Get("User-Agent")
|
||||||
|
uaHash := telemetry.FastHash([]byte(ua))
|
||||||
|
|
||||||
|
// report this request's UA in connection with this ClientHello
|
||||||
|
go telemetry.AppendUnique("tls_client_hello_ua:"+caddytls.ClientHelloInfo(info).Key(), uaHash)
|
||||||
|
|
||||||
var checked, mitm bool
|
var checked, mitm bool
|
||||||
if r.Header.Get("X-BlueCoat-Via") != "" || // Blue Coat (masks User-Agent header to generic values)
|
if r.Header.Get("X-BlueCoat-Via") != "" || // Blue Coat (masks User-Agent header to generic values)
|
||||||
|
@ -97,6 +107,13 @@ func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if checked {
|
if checked {
|
||||||
r = r.WithContext(context.WithValue(r.Context(), MitmCtxKey, mitm))
|
r = r.WithContext(context.WithValue(r.Context(), MitmCtxKey, mitm))
|
||||||
|
if mitm {
|
||||||
|
go telemetry.AppendUnique("http_mitm", "likely")
|
||||||
|
} else {
|
||||||
|
go telemetry.AppendUnique("http_mitm", "unlikely")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
go telemetry.AppendUnique("http_mitm", "unknown")
|
||||||
}
|
}
|
||||||
|
|
||||||
if mitm && h.closeOnMITM {
|
if mitm && h.closeOnMITM {
|
||||||
|
@ -195,6 +212,11 @@ func (c *clientHelloConn) Read(b []byte) (n int, err error) {
|
||||||
c.listener.helloInfos[c.Conn.RemoteAddr().String()] = rawParsed
|
c.listener.helloInfos[c.Conn.RemoteAddr().String()] = rawParsed
|
||||||
c.listener.helloInfosMu.Unlock()
|
c.listener.helloInfosMu.Unlock()
|
||||||
|
|
||||||
|
// report this ClientHello to telemetry
|
||||||
|
chKey := caddytls.ClientHelloInfo(rawParsed).Key()
|
||||||
|
go telemetry.SetNested("tls_client_hello", chKey, rawParsed)
|
||||||
|
go telemetry.AppendUnique("tls_client_hello_count", chKey)
|
||||||
|
|
||||||
c.readHello = true
|
c.readHello = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -215,6 +237,7 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
if len(data) < 42 {
|
if len(data) < 42 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
info.Version = uint16(data[4])<<8 | uint16(data[5])
|
||||||
sessionIDLen := int(data[38])
|
sessionIDLen := int(data[38])
|
||||||
if sessionIDLen > 32 || len(data) < 39+sessionIDLen {
|
if sessionIDLen > 32 || len(data) < 39+sessionIDLen {
|
||||||
return
|
return
|
||||||
|
@ -231,9 +254,9 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
}
|
}
|
||||||
numCipherSuites := cipherSuiteLen / 2
|
numCipherSuites := cipherSuiteLen / 2
|
||||||
// read in the cipher suites
|
// read in the cipher suites
|
||||||
info.cipherSuites = make([]uint16, numCipherSuites)
|
info.CipherSuites = make([]uint16, numCipherSuites)
|
||||||
for i := 0; i < numCipherSuites; i++ {
|
for i := 0; i < numCipherSuites; i++ {
|
||||||
info.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i])
|
info.CipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i])
|
||||||
}
|
}
|
||||||
data = data[2+cipherSuiteLen:]
|
data = data[2+cipherSuiteLen:]
|
||||||
if len(data) < 1 {
|
if len(data) < 1 {
|
||||||
|
@ -244,7 +267,7 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
if len(data) < 1+compressionMethodsLen {
|
if len(data) < 1+compressionMethodsLen {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info.compressionMethods = data[1 : 1+compressionMethodsLen]
|
info.CompressionMethods = data[1 : 1+compressionMethodsLen]
|
||||||
|
|
||||||
data = data[1+compressionMethodsLen:]
|
data = data[1+compressionMethodsLen:]
|
||||||
|
|
||||||
|
@ -272,7 +295,7 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// record that the client advertised support for this extension
|
// record that the client advertised support for this extension
|
||||||
info.extensions = append(info.extensions, extension)
|
info.Extensions = append(info.Extensions, extension)
|
||||||
|
|
||||||
switch extension {
|
switch extension {
|
||||||
case extensionSupportedCurves:
|
case extensionSupportedCurves:
|
||||||
|
@ -285,10 +308,10 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
numCurves := l / 2
|
numCurves := l / 2
|
||||||
info.curves = make([]tls.CurveID, numCurves)
|
info.Curves = make([]tls.CurveID, numCurves)
|
||||||
d := data[2:]
|
d := data[2:]
|
||||||
for i := 0; i < numCurves; i++ {
|
for i := 0; i < numCurves; i++ {
|
||||||
info.curves[i] = tls.CurveID(d[0])<<8 | tls.CurveID(d[1])
|
info.Curves[i] = tls.CurveID(d[0])<<8 | tls.CurveID(d[1])
|
||||||
d = d[2:]
|
d = d[2:]
|
||||||
}
|
}
|
||||||
case extensionSupportedPoints:
|
case extensionSupportedPoints:
|
||||||
|
@ -300,8 +323,8 @@ func parseRawClientHello(data []byte) (info rawHelloInfo) {
|
||||||
if length != l+1 {
|
if length != l+1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
info.points = make([]uint8, l)
|
info.Points = make([]uint8, l)
|
||||||
copy(info.points, data[1:])
|
copy(info.Points, data[1:])
|
||||||
}
|
}
|
||||||
|
|
||||||
data = data[length:]
|
data = data[length:]
|
||||||
|
@ -352,18 +375,12 @@ func (l *tlsHelloListener) Accept() (net.Conn, error) {
|
||||||
// by Durumeric, Halderman, et. al. in
|
// by Durumeric, Halderman, et. al. in
|
||||||
// "The Security Impact of HTTPS Interception":
|
// "The Security Impact of HTTPS Interception":
|
||||||
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
|
// https://jhalderm.com/pub/papers/interception-ndss17.pdf
|
||||||
type rawHelloInfo struct {
|
type rawHelloInfo caddytls.ClientHelloInfo
|
||||||
cipherSuites []uint16
|
|
||||||
extensions []uint16
|
|
||||||
compressionMethods []byte
|
|
||||||
curves []tls.CurveID
|
|
||||||
points []uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// advertisesHeartbeatSupport returns true if info indicates
|
// advertisesHeartbeatSupport returns true if info indicates
|
||||||
// that the client supports the Heartbeat extension.
|
// that the client supports the Heartbeat extension.
|
||||||
func (info rawHelloInfo) advertisesHeartbeatSupport() bool {
|
func (info rawHelloInfo) advertisesHeartbeatSupport() bool {
|
||||||
for _, ext := range info.extensions {
|
for _, ext := range info.Extensions {
|
||||||
if ext == extensionHeartbeat {
|
if ext == extensionHeartbeat {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -386,31 +403,31 @@ func (info rawHelloInfo) looksLikeFirefox() bool {
|
||||||
// Note: Firefox 55+ doesn't appear to advertise 0xFF03 (65283, short headers). It used to be between 5 and 13.
|
// Note: Firefox 55+ doesn't appear to advertise 0xFF03 (65283, short headers). It used to be between 5 and 13.
|
||||||
// Note: Firefox on Fedora (or RedHat) doesn't include ECC suites because of patent liability.
|
// Note: Firefox on Fedora (or RedHat) doesn't include ECC suites because of patent liability.
|
||||||
requiredExtensionsOrder := []uint16{23, 65281, 10, 11, 35, 16, 5, 13}
|
requiredExtensionsOrder := []uint16{23, 65281, 10, 11, 35, 16, 5, 13}
|
||||||
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
|
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// We check for both presence of curves and their ordering.
|
// We check for both presence of curves and their ordering.
|
||||||
requiredCurves := []tls.CurveID{29, 23, 24, 25}
|
requiredCurves := []tls.CurveID{29, 23, 24, 25}
|
||||||
if len(info.curves) < len(requiredCurves) {
|
if len(info.Curves) < len(requiredCurves) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for i := range requiredCurves {
|
for i := range requiredCurves {
|
||||||
if info.curves[i] != requiredCurves[i] {
|
if info.Curves[i] != requiredCurves[i] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(info.curves) > len(requiredCurves) {
|
if len(info.Curves) > len(requiredCurves) {
|
||||||
// newer Firefox (55 Nightly?) may have additional curves at end of list
|
// newer Firefox (55 Nightly?) may have additional curves at end of list
|
||||||
allowedCurves := []tls.CurveID{256, 257}
|
allowedCurves := []tls.CurveID{256, 257}
|
||||||
for i := range allowedCurves {
|
for i := range allowedCurves {
|
||||||
if info.curves[len(requiredCurves)+i] != allowedCurves[i] {
|
if info.Curves[len(requiredCurves)+i] != allowedCurves[i] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasGreaseCiphers(info.cipherSuites) {
|
if hasGreaseCiphers(info.CipherSuites) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -437,7 +454,7 @@ func (info rawHelloInfo) looksLikeFirefox() bool {
|
||||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
||||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
|
||||||
}
|
}
|
||||||
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, false)
|
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// looksLikeChrome returns true if info looks like a handshake
|
// looksLikeChrome returns true if info looks like a handshake
|
||||||
|
@ -478,20 +495,20 @@ func (info rawHelloInfo) looksLikeChrome() bool {
|
||||||
TLS_DHE_RSA_WITH_AES_128_CBC_SHA: {}, // 0x33
|
TLS_DHE_RSA_WITH_AES_128_CBC_SHA: {}, // 0x33
|
||||||
TLS_DHE_RSA_WITH_AES_256_CBC_SHA: {}, // 0x39
|
TLS_DHE_RSA_WITH_AES_256_CBC_SHA: {}, // 0x39
|
||||||
}
|
}
|
||||||
for _, ext := range info.cipherSuites {
|
for _, ext := range info.CipherSuites {
|
||||||
if _, ok := chromeCipherExclusions[ext]; ok {
|
if _, ok := chromeCipherExclusions[ext]; ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chrome does not include curve 25 (CurveP521) (as of Chrome 56, Feb. 2017).
|
// Chrome does not include curve 25 (CurveP521) (as of Chrome 56, Feb. 2017).
|
||||||
for _, curve := range info.curves {
|
for _, curve := range info.Curves {
|
||||||
if curve == 25 {
|
if curve == 25 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !hasGreaseCiphers(info.cipherSuites) {
|
if !hasGreaseCiphers(info.CipherSuites) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,19 +526,19 @@ func (info rawHelloInfo) looksLikeEdge() bool {
|
||||||
// More specifically, the OCSP status request extension appears
|
// More specifically, the OCSP status request extension appears
|
||||||
// *directly* before the other two extensions, which occur in that
|
// *directly* before the other two extensions, which occur in that
|
||||||
// order. (I contacted the authors for clarification and verified it.)
|
// order. (I contacted the authors for clarification and verified it.)
|
||||||
for i, ext := range info.extensions {
|
for i, ext := range info.Extensions {
|
||||||
if ext == extensionOCSPStatusRequest {
|
if ext == extensionOCSPStatusRequest {
|
||||||
if len(info.extensions) <= i+2 {
|
if len(info.Extensions) <= i+2 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if info.extensions[i+1] != extensionSupportedCurves ||
|
if info.Extensions[i+1] != extensionSupportedCurves ||
|
||||||
info.extensions[i+2] != extensionSupportedPoints {
|
info.Extensions[i+2] != extensionSupportedPoints {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cs := range info.cipherSuites {
|
for _, cs := range info.CipherSuites {
|
||||||
// As of Feb. 2017, Edge does not have 0xff, but Avast adds it
|
// As of Feb. 2017, Edge does not have 0xff, but Avast adds it
|
||||||
if cs == scsvRenegotiation {
|
if cs == scsvRenegotiation {
|
||||||
return false
|
return false
|
||||||
|
@ -532,7 +549,7 @@ func (info rawHelloInfo) looksLikeEdge() bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasGreaseCiphers(info.cipherSuites) {
|
if hasGreaseCiphers(info.CipherSuites) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -558,23 +575,23 @@ func (info rawHelloInfo) looksLikeSafari() bool {
|
||||||
|
|
||||||
// We check for the presence and order of the extensions.
|
// We check for the presence and order of the extensions.
|
||||||
requiredExtensionsOrder := []uint16{10, 11, 13, 13172, 16, 5, 18, 23}
|
requiredExtensionsOrder := []uint16{10, 11, 13, 13172, 16, 5, 18, 23}
|
||||||
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
|
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) {
|
||||||
// Safari on iOS 11 (beta) uses different set/ordering of extensions
|
// Safari on iOS 11 (beta) uses different set/ordering of extensions
|
||||||
requiredExtensionsOrderiOS11 := []uint16{65281, 0, 23, 13, 5, 13172, 18, 16, 11, 10}
|
requiredExtensionsOrderiOS11 := []uint16{65281, 0, 23, 13, 5, 13172, 18, 16, 11, 10}
|
||||||
if !assertPresenceAndOrdering(requiredExtensionsOrderiOS11, info.extensions, true) {
|
if !assertPresenceAndOrdering(requiredExtensionsOrderiOS11, info.Extensions, true) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For these versions of Safari, expect TLS_EMPTY_RENEGOTIATION_INFO_SCSV first.
|
// For these versions of Safari, expect TLS_EMPTY_RENEGOTIATION_INFO_SCSV first.
|
||||||
if len(info.cipherSuites) < 1 {
|
if len(info.CipherSuites) < 1 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if info.cipherSuites[0] != scsvRenegotiation {
|
if info.CipherSuites[0] != scsvRenegotiation {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasGreaseCiphers(info.cipherSuites) {
|
if hasGreaseCiphers(info.CipherSuites) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -599,19 +616,19 @@ func (info rawHelloInfo) looksLikeSafari() bool {
|
||||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
||||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f
|
||||||
}
|
}
|
||||||
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, true)
|
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// looksLikeTor returns true if the info looks like a ClientHello from Tor browser
|
// looksLikeTor returns true if the info looks like a ClientHello from Tor browser
|
||||||
// (based on Firefox).
|
// (based on Firefox).
|
||||||
func (info rawHelloInfo) looksLikeTor() bool {
|
func (info rawHelloInfo) looksLikeTor() bool {
|
||||||
requiredExtensionsOrder := []uint16{10, 11, 16, 5, 13}
|
requiredExtensionsOrder := []uint16{10, 11, 16, 5, 13}
|
||||||
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.extensions, true) {
|
if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for session tickets support; Tor doesn't support them to prevent tracking
|
// check for session tickets support; Tor doesn't support them to prevent tracking
|
||||||
for _, ext := range info.extensions {
|
for _, ext := range info.Extensions {
|
||||||
if ext == 35 {
|
if ext == 35 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -619,12 +636,12 @@ func (info rawHelloInfo) looksLikeTor() bool {
|
||||||
|
|
||||||
// We check for both presence of curves and their ordering, including
|
// We check for both presence of curves and their ordering, including
|
||||||
// an optional curve at the beginning (for Tor based on Firefox 52)
|
// an optional curve at the beginning (for Tor based on Firefox 52)
|
||||||
infoCurves := info.curves
|
infoCurves := info.Curves
|
||||||
if len(info.curves) == 4 {
|
if len(info.Curves) == 4 {
|
||||||
if info.curves[0] != 29 {
|
if info.Curves[0] != 29 {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
infoCurves = info.curves[1:]
|
infoCurves = info.Curves[1:]
|
||||||
}
|
}
|
||||||
requiredCurves := []tls.CurveID{23, 24, 25}
|
requiredCurves := []tls.CurveID{23, 24, 25}
|
||||||
if len(infoCurves) < len(requiredCurves) {
|
if len(infoCurves) < len(requiredCurves) {
|
||||||
|
@ -636,7 +653,7 @@ func (info rawHelloInfo) looksLikeTor() bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if hasGreaseCiphers(info.cipherSuites) {
|
if hasGreaseCiphers(info.CipherSuites) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -663,7 +680,7 @@ func (info rawHelloInfo) looksLikeTor() bool {
|
||||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35
|
||||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa
|
||||||
}
|
}
|
||||||
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.cipherSuites, false)
|
return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertPresenceAndOrdering will return true if candidateList contains
|
// assertPresenceAndOrdering will return true if candidateList contains
|
||||||
|
|
|
@ -32,44 +32,48 @@ func TestParseClientHello(t *testing.T) {
|
||||||
// curl 7.51.0 (x86_64-apple-darwin16.0) libcurl/7.51.0 SecureTransport zlib/1.2.8
|
// curl 7.51.0 (x86_64-apple-darwin16.0) libcurl/7.51.0 SecureTransport zlib/1.2.8
|
||||||
inputHex: `010000a6030358a28c73a71bdfc1f09dee13fecdc58805dcce42ac44254df548f14645f7dc2c00004400ffc02cc02bc024c023c00ac009c008c030c02fc028c027c014c013c012009f009e006b0067003900330016009d009c003d003c0035002f000a00af00ae008d008c008b01000039000a00080006001700180019000b00020100000d00120010040102010501060104030203050306030005000501000000000012000000170000`,
|
inputHex: `010000a6030358a28c73a71bdfc1f09dee13fecdc58805dcce42ac44254df548f14645f7dc2c00004400ffc02cc02bc024c023c00ac009c008c030c02fc028c027c014c013c012009f009e006b0067003900330016009d009c003d003c0035002f000a00af00ae008d008c008b01000039000a00080006001700180019000b00020100000d00120010040102010501060104030203050306030005000501000000000012000000170000`,
|
||||||
expected: rawHelloInfo{
|
expected: rawHelloInfo{
|
||||||
cipherSuites: []uint16{255, 49196, 49195, 49188, 49187, 49162, 49161, 49160, 49200, 49199, 49192, 49191, 49172, 49171, 49170, 159, 158, 107, 103, 57, 51, 22, 157, 156, 61, 60, 53, 47, 10, 175, 174, 141, 140, 139},
|
Version: 0x303,
|
||||||
extensions: []uint16{10, 11, 13, 5, 18, 23},
|
CipherSuites: []uint16{255, 49196, 49195, 49188, 49187, 49162, 49161, 49160, 49200, 49199, 49192, 49191, 49172, 49171, 49170, 159, 158, 107, 103, 57, 51, 22, 157, 156, 61, 60, 53, 47, 10, 175, 174, 141, 140, 139},
|
||||||
compressionMethods: []byte{0},
|
Extensions: []uint16{10, 11, 13, 5, 18, 23},
|
||||||
curves: []tls.CurveID{23, 24, 25},
|
CompressionMethods: []byte{0},
|
||||||
points: []uint8{0},
|
Curves: []tls.CurveID{23, 24, 25},
|
||||||
|
Points: []uint8{0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Chrome 56
|
// Chrome 56
|
||||||
inputHex: `010000c003031dae75222dae1433a5a283ddcde8ddabaefbf16d84f250eee6fdff48cdfff8a00000201a1ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a010000777a7a0000ff010001000000000e000c0000096c6f63616c686f73740017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a0008aaaa001d001700182a2a000100`,
|
inputHex: `010000c003031dae75222dae1433a5a283ddcde8ddabaefbf16d84f250eee6fdff48cdfff8a00000201a1ac02bc02fc02cc030cca9cca8cc14cc13c013c014009c009d002f0035000a010000777a7a0000ff010001000000000e000c0000096c6f63616c686f73740017000000230000000d00140012040308040401050308050501080606010201000500050100000000001200000010000e000c02683208687474702f312e3175500000000b00020100000a000a0008aaaa001d001700182a2a000100`,
|
||||||
expected: rawHelloInfo{
|
expected: rawHelloInfo{
|
||||||
cipherSuites: []uint16{6682, 49195, 49199, 49196, 49200, 52393, 52392, 52244, 52243, 49171, 49172, 156, 157, 47, 53, 10},
|
Version: 0x303,
|
||||||
extensions: []uint16{31354, 65281, 0, 23, 35, 13, 5, 18, 16, 30032, 11, 10, 10794},
|
CipherSuites: []uint16{6682, 49195, 49199, 49196, 49200, 52393, 52392, 52244, 52243, 49171, 49172, 156, 157, 47, 53, 10},
|
||||||
compressionMethods: []byte{0},
|
Extensions: []uint16{31354, 65281, 0, 23, 35, 13, 5, 18, 16, 30032, 11, 10, 10794},
|
||||||
curves: []tls.CurveID{43690, 29, 23, 24},
|
CompressionMethods: []byte{0},
|
||||||
points: []uint8{0},
|
Curves: []tls.CurveID{43690, 29, 23, 24},
|
||||||
|
Points: []uint8{0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// Firefox 51
|
// Firefox 51
|
||||||
inputHex: `010000bd030375f9022fc3a6562467f3540d68013b2d0b961979de6129e944efe0b35531323500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a010000760000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000ff030000000d0020001e040305030603020308040805080604010501060102010402050206020202`,
|
inputHex: `010000bd030375f9022fc3a6562467f3540d68013b2d0b961979de6129e944efe0b35531323500001ec02bc02fcca9cca8c02cc030c00ac009c013c01400330039002f0035000a010000760000000e000c0000096c6f63616c686f737400170000ff01000100000a000a0008001d001700180019000b00020100002300000010000e000c02683208687474702f312e31000500050100000000ff030000000d0020001e040305030603020308040805080604010501060102010402050206020202`,
|
||||||
expected: rawHelloInfo{
|
expected: rawHelloInfo{
|
||||||
cipherSuites: []uint16{49195, 49199, 52393, 52392, 49196, 49200, 49162, 49161, 49171, 49172, 51, 57, 47, 53, 10},
|
Version: 0x303,
|
||||||
extensions: []uint16{0, 23, 65281, 10, 11, 35, 16, 5, 65283, 13},
|
CipherSuites: []uint16{49195, 49199, 52393, 52392, 49196, 49200, 49162, 49161, 49171, 49172, 51, 57, 47, 53, 10},
|
||||||
compressionMethods: []byte{0},
|
Extensions: []uint16{0, 23, 65281, 10, 11, 35, 16, 5, 65283, 13},
|
||||||
curves: []tls.CurveID{29, 23, 24, 25},
|
CompressionMethods: []byte{0},
|
||||||
points: []uint8{0},
|
Curves: []tls.CurveID{29, 23, 24, 25},
|
||||||
|
Points: []uint8{0},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// openssl s_client (OpenSSL 0.9.8zh 14 Jan 2016)
|
// openssl s_client (OpenSSL 0.9.8zh 14 Jan 2016)
|
||||||
inputHex: `0100012b03035d385236b8ca7b7946fa0336f164e76bf821ed90e8de26d97cc677671b6f36380000acc030c02cc028c024c014c00a00a500a300a1009f006b006a0069006800390038003700360088008700860085c032c02ec02ac026c00fc005009d003d00350084c02fc02bc027c023c013c00900a400a200a0009e00670040003f003e0033003200310030009a0099009800970045004400430042c031c02dc029c025c00ec004009c003c002f009600410007c011c007c00cc00200050004c012c008001600130010000dc00dc003000a00ff0201000055000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000f000101`,
|
inputHex: `0100012b03035d385236b8ca7b7946fa0336f164e76bf821ed90e8de26d97cc677671b6f36380000acc030c02cc028c024c014c00a00a500a300a1009f006b006a0069006800390038003700360088008700860085c032c02ec02ac026c00fc005009d003d00350084c02fc02bc027c023c013c00900a400a200a0009e00670040003f003e0033003200310030009a0099009800970045004400430042c031c02dc029c025c00ec004009c003c002f009600410007c011c007c00cc00200050004c012c008001600130010000dc00dc003000a00ff0201000055000b000403000102000a001c001a00170019001c001b0018001a0016000e000d000b000c0009000a00230000000d0020001e060106020603050105020503040104020403030103020303020102020203000f000101`,
|
||||||
expected: rawHelloInfo{
|
expected: rawHelloInfo{
|
||||||
cipherSuites: []uint16{49200, 49196, 49192, 49188, 49172, 49162, 165, 163, 161, 159, 107, 106, 105, 104, 57, 56, 55, 54, 136, 135, 134, 133, 49202, 49198, 49194, 49190, 49167, 49157, 157, 61, 53, 132, 49199, 49195, 49191, 49187, 49171, 49161, 164, 162, 160, 158, 103, 64, 63, 62, 51, 50, 49, 48, 154, 153, 152, 151, 69, 68, 67, 66, 49201, 49197, 49193, 49189, 49166, 49156, 156, 60, 47, 150, 65, 7, 49169, 49159, 49164, 49154, 5, 4, 49170, 49160, 22, 19, 16, 13, 49165, 49155, 10, 255},
|
Version: 0x303,
|
||||||
extensions: []uint16{11, 10, 35, 13, 15},
|
CipherSuites: []uint16{49200, 49196, 49192, 49188, 49172, 49162, 165, 163, 161, 159, 107, 106, 105, 104, 57, 56, 55, 54, 136, 135, 134, 133, 49202, 49198, 49194, 49190, 49167, 49157, 157, 61, 53, 132, 49199, 49195, 49191, 49187, 49171, 49161, 164, 162, 160, 158, 103, 64, 63, 62, 51, 50, 49, 48, 154, 153, 152, 151, 69, 68, 67, 66, 49201, 49197, 49193, 49189, 49166, 49156, 156, 60, 47, 150, 65, 7, 49169, 49159, 49164, 49154, 5, 4, 49170, 49160, 22, 19, 16, 13, 49165, 49155, 10, 255},
|
||||||
compressionMethods: []byte{1, 0},
|
Extensions: []uint16{11, 10, 35, 13, 15},
|
||||||
curves: []tls.CurveID{23, 25, 28, 27, 24, 26, 22, 14, 13, 11, 12, 9, 10},
|
CompressionMethods: []byte{1, 0},
|
||||||
points: []uint8{0, 1, 2},
|
Curves: []tls.CurveID{23, 25, 28, 27, 24, 26, 22, 14, 13, 11, 12, 9, 10},
|
||||||
|
Points: []uint8{0, 1, 2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
|
@ -338,8 +342,8 @@ func TestHeuristicFunctionsAndHandler(t *testing.T) {
|
||||||
(isEdge && (isChrome || isFirefox || isSafari || isTor)) ||
|
(isEdge && (isChrome || isFirefox || isSafari || isTor)) ||
|
||||||
(isTor && (isChrome || isFirefox || isSafari || isEdge)) {
|
(isTor && (isChrome || isFirefox || isSafari || isEdge)) {
|
||||||
t.Errorf("[%s] Test %d: Multiple fingerprinting functions matched: "+
|
t.Errorf("[%s] Test %d: Multiple fingerprinting functions matched: "+
|
||||||
"Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n\tparsed hello hex: %#x\n",
|
"Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n",
|
||||||
client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed, parsed)
|
client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// test the handler and detection results
|
// test the handler and detection results
|
||||||
|
@ -367,8 +371,8 @@ func TestHeuristicFunctionsAndHandler(t *testing.T) {
|
||||||
if got != want {
|
if got != want {
|
||||||
t.Errorf("[%s] Test %d: Expected MITM=%v but got %v (type assertion OK (checked)=%v)",
|
t.Errorf("[%s] Test %d: Expected MITM=%v but got %v (type assertion OK (checked)=%v)",
|
||||||
client, i, want, got, checked)
|
client, i, want, got, checked)
|
||||||
t.Errorf("[%s] Test %d: Looks like Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n\tparsed hello hex: %#x\n",
|
t.Errorf("[%s] Test %d: Looks like Chrome=%v Firefox=%v Safari=%v Edge=%v Tor=%v\n\tparsed hello dec: %+v\n",
|
||||||
client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed, parsed)
|
client, i, isChrome, isFirefox, isSafari, isEdge, isTor, parsed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"github.com/mholt/caddy/caddyfile"
|
"github.com/mholt/caddy/caddyfile"
|
||||||
"github.com/mholt/caddy/caddyhttp/staticfiles"
|
"github.com/mholt/caddy/caddyhttp/staticfiles"
|
||||||
"github.com/mholt/caddy/caddytls"
|
"github.com/mholt/caddy/caddytls"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
const serverType = "http"
|
const serverType = "http"
|
||||||
|
@ -66,6 +67,12 @@ func init() {
|
||||||
caddy.RegisterParsingCallback(serverType, "root", hideCaddyfile)
|
caddy.RegisterParsingCallback(serverType, "root", hideCaddyfile)
|
||||||
caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS)
|
caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS)
|
||||||
caddytls.RegisterConfigGetter(serverType, func(c *caddy.Controller) *caddytls.Config { return GetConfig(c).TLS })
|
caddytls.RegisterConfigGetter(serverType, func(c *caddy.Controller) *caddytls.Config { return GetConfig(c).TLS })
|
||||||
|
|
||||||
|
// disable the caddytls package reporting ClientHellos
|
||||||
|
// to telemetry, since our MITM detector does this but
|
||||||
|
// with more information than the standard lib provides
|
||||||
|
// (as of May 2018)
|
||||||
|
caddytls.ClientHelloTelemetry = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// hideCaddyfile hides the source/origin Caddyfile if it is within the
|
// hideCaddyfile hides the source/origin Caddyfile if it is within the
|
||||||
|
@ -208,6 +215,18 @@ func (h *httpContext) InspectServerBlocks(sourceFile string, serverBlocks []cadd
|
||||||
// MakeServers uses the newly-created siteConfigs to
|
// MakeServers uses the newly-created siteConfigs to
|
||||||
// create and return a list of server instances.
|
// create and return a list of server instances.
|
||||||
func (h *httpContext) MakeServers() ([]caddy.Server, error) {
|
func (h *httpContext) MakeServers() ([]caddy.Server, error) {
|
||||||
|
// make a rough estimate as to whether we're in a "production
|
||||||
|
// environment/system" - start by assuming that most production
|
||||||
|
// servers will set their default CA endpoint to a public,
|
||||||
|
// trusted CA (obviously not a perfect hueristic)
|
||||||
|
var looksLikeProductionCA bool
|
||||||
|
for _, publicCAEndpoint := range caddytls.KnownACMECAs {
|
||||||
|
if strings.Contains(caddytls.DefaultCAUrl, publicCAEndpoint) {
|
||||||
|
looksLikeProductionCA = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Iterate each site configuration and make sure that:
|
// Iterate each site configuration and make sure that:
|
||||||
// 1) TLS is disabled for explicitly-HTTP sites (necessary
|
// 1) TLS is disabled for explicitly-HTTP sites (necessary
|
||||||
// when an HTTP address shares a block containing tls)
|
// when an HTTP address shares a block containing tls)
|
||||||
|
@ -215,7 +234,22 @@ func (h *httpContext) MakeServers() ([]caddy.Server, error) {
|
||||||
// currently, QUIC does not support ClientAuth (TODO:
|
// currently, QUIC does not support ClientAuth (TODO:
|
||||||
// revisit this when our QUIC implementation supports it)
|
// revisit this when our QUIC implementation supports it)
|
||||||
// 3) if TLS ClientAuth is used, StrictHostMatching is on
|
// 3) if TLS ClientAuth is used, StrictHostMatching is on
|
||||||
|
var atLeastOneSiteLooksLikeProduction bool
|
||||||
for _, cfg := range h.siteConfigs {
|
for _, cfg := range h.siteConfigs {
|
||||||
|
// see if all the addresses (both sites and
|
||||||
|
// listeners) are loopback to help us determine
|
||||||
|
// if this is a "production" instance or not
|
||||||
|
if !atLeastOneSiteLooksLikeProduction {
|
||||||
|
if !caddy.IsLoopback(cfg.Addr.Host) &&
|
||||||
|
!caddy.IsLoopback(cfg.ListenHost) &&
|
||||||
|
(caddytls.QualifiesForManagedTLS(cfg) ||
|
||||||
|
caddytls.HostQualifies(cfg.Addr.Host)) {
|
||||||
|
atLeastOneSiteLooksLikeProduction = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure TLS is disabled for explicitly-HTTP sites
|
||||||
|
// (necessary when HTTP address shares a block containing tls)
|
||||||
if !cfg.TLS.Enabled {
|
if !cfg.TLS.Enabled {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -265,6 +299,18 @@ func (h *httpContext) MakeServers() ([]caddy.Server, error) {
|
||||||
servers = append(servers, s)
|
servers = append(servers, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NOTE: This value is only a "good guess". Quite often, development
|
||||||
|
// environments will use internal DNS or a local hosts file to serve
|
||||||
|
// real-looking domains in local development. We can't easily tell
|
||||||
|
// which without doing a DNS lookup, so this guess is definitely naive,
|
||||||
|
// and if we ever want a better guess, we will have to do DNS lookups.
|
||||||
|
deploymentGuess := "dev"
|
||||||
|
if looksLikeProductionCA && atLeastOneSiteLooksLikeProduction {
|
||||||
|
deploymentGuess = "prod"
|
||||||
|
}
|
||||||
|
telemetry.Set("http_deployment_guess", deploymentGuess)
|
||||||
|
telemetry.Set("http_num_sites", len(h.siteConfigs))
|
||||||
|
|
||||||
return servers, nil
|
return servers, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/mholt/caddy"
|
"github.com/mholt/caddy"
|
||||||
"github.com/mholt/caddy/caddyhttp/staticfiles"
|
"github.com/mholt/caddy/caddyhttp/staticfiles"
|
||||||
"github.com/mholt/caddy/caddytls"
|
"github.com/mholt/caddy/caddytls"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Server is the HTTP server implementation.
|
// Server is the HTTP server implementation.
|
||||||
|
@ -348,6 +349,16 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// record the User-Agent string (with a cap on its length to mitigate attacks)
|
||||||
|
ua := r.Header.Get("User-Agent")
|
||||||
|
if len(ua) > 512 {
|
||||||
|
ua = ua[:512]
|
||||||
|
}
|
||||||
|
uaHash := telemetry.FastHash([]byte(ua)) // this is a normalized field
|
||||||
|
go telemetry.SetNested("http_user_agent", uaHash, ua)
|
||||||
|
go telemetry.AppendUnique("http_user_agent_count", uaHash)
|
||||||
|
go telemetry.Increment("http_request_count")
|
||||||
|
|
||||||
// copy the original, unchanged URL into the context
|
// copy the original, unchanged URL into the context
|
||||||
// so it can be referenced by middlewares
|
// so it can be referenced by middlewares
|
||||||
urlCopy := *r.URL
|
urlCopy := *r.URL
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
"golang.org/x/crypto/ocsp"
|
"golang.org/x/crypto/ocsp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -165,6 +166,7 @@ func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cert, err
|
return cert, err
|
||||||
}
|
}
|
||||||
|
telemetry.Increment("tls_managed_cert_count")
|
||||||
return cfg.cacheCertificate(cert), nil
|
return cfg.cacheCertificate(cert), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,6 +181,7 @@ func (cfg *Config) cacheUnmanagedCertificatePEMFile(certFile, keyFile string) er
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cfg.cacheCertificate(cert)
|
cfg.cacheCertificate(cert)
|
||||||
|
telemetry.Increment("tls_manual_cert_count")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,6 +195,7 @@ func (cfg *Config) cacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cfg.cacheCertificate(cert)
|
cfg.cacheCertificate(cert)
|
||||||
|
telemetry.Increment("tls_manual_cert_count")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mholt/caddy"
|
"github.com/mholt/caddy"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
"github.com/xenolf/lego/acmev2"
|
"github.com/xenolf/lego/acmev2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -273,6 +274,8 @@ func (c *ACMEClient) Obtain(name string) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go telemetry.Increment("tls_acme_certs_obtained")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,6 +343,7 @@ func (c *ACMEClient) Renew(name string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
caddy.EmitEvent(caddy.CertRenewEvent, name)
|
caddy.EmitEvent(caddy.CertRenewEvent, name)
|
||||||
|
go telemetry.Increment("tls_acme_certs_renewed")
|
||||||
|
|
||||||
return saveCertResource(c.storage, newCertMeta)
|
return saveCertResource(c.storage, newCertMeta)
|
||||||
}
|
}
|
||||||
|
@ -366,6 +370,8 @@ func (c *ACMEClient) Revoke(name string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
go telemetry.Increment("tls_acme_certs_revoked")
|
||||||
|
|
||||||
err = c.storage.DeleteSite(name)
|
err = c.storage.DeleteSite(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.New("certificate revoked, but unable to delete certificate file: " + err.Error())
|
return errors.New("certificate revoked, but unable to delete certificate file: " + err.Error())
|
||||||
|
@ -417,3 +423,10 @@ func (c *nameCoordinator) Has(name string) bool {
|
||||||
c.mu.RUnlock()
|
c.mu.RUnlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KnownACMECAs is a list of ACME directory endpoints of
|
||||||
|
// known, public, and trusted ACME-compatible certificate
|
||||||
|
// authorities.
|
||||||
|
var KnownACMECAs = []string{
|
||||||
|
"https://acme-v02.api.letsencrypt.org/directory",
|
||||||
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/codahale/aesnicheck"
|
"github.com/klauspost/cpuid"
|
||||||
"github.com/mholt/caddy"
|
"github.com/mholt/caddy"
|
||||||
"github.com/xenolf/lego/acmev2"
|
"github.com/xenolf/lego/acmev2"
|
||||||
)
|
)
|
||||||
|
@ -648,7 +648,7 @@ var defaultCiphersNonAESNI = []uint16{
|
||||||
//
|
//
|
||||||
// See https://github.com/mholt/caddy/issues/1674
|
// See https://github.com/mholt/caddy/issues/1674
|
||||||
func getPreferredDefaultCiphers() []uint16 {
|
func getPreferredDefaultCiphers() []uint16 {
|
||||||
if aesnicheck.HasAESNI() {
|
if cpuid.CPU.AesNi() {
|
||||||
return defaultCiphers
|
return defaultCiphers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/codahale/aesnicheck"
|
"github.com/klauspost/cpuid"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestConvertTLSConfigProtocolVersions(t *testing.T) {
|
func TestConvertTLSConfigProtocolVersions(t *testing.T) {
|
||||||
|
@ -98,7 +98,7 @@ func TestConvertTLSConfigCipherSuites(t *testing.T) {
|
||||||
|
|
||||||
func TestGetPreferredDefaultCiphers(t *testing.T) {
|
func TestGetPreferredDefaultCiphers(t *testing.T) {
|
||||||
expectedCiphers := defaultCiphers
|
expectedCiphers := defaultCiphers
|
||||||
if !aesnicheck.HasAESNI() {
|
if !cpuid.CPU.AesNi() {
|
||||||
expectedCiphers = defaultCiphersNonAESNI
|
expectedCiphers = defaultCiphersNonAESNI
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -341,7 +341,7 @@ func standaloneTLSTicketKeyRotation(c *tls.Config, ticker *time.Ticker, exitChan
|
||||||
// Do not use this for cryptographic purposes.
|
// Do not use this for cryptographic purposes.
|
||||||
func fastHash(input []byte) string {
|
func fastHash(input []byte) string {
|
||||||
h := fnv.New32a()
|
h := fnv.New32a()
|
||||||
h.Write([]byte(input))
|
h.Write(input)
|
||||||
return fmt.Sprintf("%x", h.Sum32())
|
return fmt.Sprintf("%x", h.Sum32())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// configGroup is a type that keys configs by their hostname
|
// configGroup is a type that keys configs by their hostname
|
||||||
|
@ -97,7 +99,27 @@ func (cg configGroup) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls
|
||||||
//
|
//
|
||||||
// This method is safe for use as a tls.Config.GetCertificate callback.
|
// This method is safe for use as a tls.Config.GetCertificate callback.
|
||||||
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||||
|
if ClientHelloTelemetry && len(clientHello.SupportedVersions) > 0 {
|
||||||
|
// If no other plugin (such as the HTTP server type) is implementing ClientHello telemetry, we do it.
|
||||||
|
// NOTE: The values in the Go standard lib's ClientHelloInfo aren't guaranteed to be in order.
|
||||||
|
info := ClientHelloInfo{
|
||||||
|
Version: clientHello.SupportedVersions[0], // report the highest
|
||||||
|
CipherSuites: clientHello.CipherSuites,
|
||||||
|
ExtensionsUnknown: true, // no extension info... :(
|
||||||
|
CompressionMethodsUnknown: true, // no compression methods... :(
|
||||||
|
Curves: clientHello.SupportedCurves,
|
||||||
|
Points: clientHello.SupportedPoints,
|
||||||
|
// We also have, but do not yet use: SignatureSchemes, ServerName, and SupportedProtos (ALPN)
|
||||||
|
// because the standard lib parses some extensions, but our MITM detector generally doesn't.
|
||||||
|
}
|
||||||
|
go telemetry.SetNested("tls_client_hello", info.Key(), info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the certificate and serve it up
|
||||||
cert, err := cfg.getCertDuringHandshake(strings.ToLower(clientHello.ServerName), true, true)
|
cert, err := cfg.getCertDuringHandshake(strings.ToLower(clientHello.ServerName), true, true)
|
||||||
|
if err == nil {
|
||||||
|
go telemetry.Increment("tls_handshake_count") // TODO: This is a "best guess" for now, we need something listener-level
|
||||||
|
}
|
||||||
return &cert.Certificate, err
|
return &cert.Certificate, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -463,6 +485,42 @@ func (cfg *Config) renewDynamicCertificate(name string, currentCert Certificate)
|
||||||
return cfg.getCertDuringHandshake(name, true, false)
|
return cfg.getCertDuringHandshake(name, true, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientHelloInfo is our own version of the standard lib's
|
||||||
|
// tls.ClientHelloInfo. As of May 2018, any fields populated
|
||||||
|
// by the Go standard library are not guaranteed to have their
|
||||||
|
// values in the original order as on the wire.
|
||||||
|
type ClientHelloInfo struct {
|
||||||
|
Version uint16 `json:"version,omitempty"`
|
||||||
|
CipherSuites []uint16 `json:"cipher_suites,omitempty"`
|
||||||
|
Extensions []uint16 `json:"extensions,omitempty"`
|
||||||
|
CompressionMethods []byte `json:"compression,omitempty"`
|
||||||
|
Curves []tls.CurveID `json:"curves,omitempty"`
|
||||||
|
Points []uint8 `json:"points,omitempty"`
|
||||||
|
|
||||||
|
// Whether a couple of fields are unknown; if not, the key will encode
|
||||||
|
// differently to reflect that, as opposed to being known empty values.
|
||||||
|
// (some fields may be unknown depending on what package is being used;
|
||||||
|
// i.e. the Go standard lib doesn't expose some things)
|
||||||
|
// (very important to NOT encode these to JSON)
|
||||||
|
ExtensionsUnknown bool `json:"-"`
|
||||||
|
CompressionMethodsUnknown bool `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns a standardized string form of the data in info,
|
||||||
|
// useful for identifying duplicates.
|
||||||
|
func (info ClientHelloInfo) Key() string {
|
||||||
|
extensions, compressionMethods := "?", "?"
|
||||||
|
if !info.ExtensionsUnknown {
|
||||||
|
extensions = fmt.Sprintf("%x", info.Extensions)
|
||||||
|
}
|
||||||
|
if !info.CompressionMethodsUnknown {
|
||||||
|
compressionMethods = fmt.Sprintf("%x", info.CompressionMethods)
|
||||||
|
}
|
||||||
|
return telemetry.FastHash([]byte(fmt.Sprintf("%x-%x-%s-%s-%x-%x",
|
||||||
|
info.Version, info.CipherSuites, extensions,
|
||||||
|
compressionMethods, info.Curves, info.Points)))
|
||||||
|
}
|
||||||
|
|
||||||
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
|
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
|
||||||
var obtainCertWaitChans = make(map[string]chan struct{})
|
var obtainCertWaitChans = make(map[string]chan struct{})
|
||||||
var obtainCertWaitChansMu sync.Mutex
|
var obtainCertWaitChansMu sync.Mutex
|
||||||
|
@ -477,3 +535,8 @@ var failedIssuanceMu sync.RWMutex
|
||||||
// If this value is recent, do not make any on-demand certificate requests.
|
// If this value is recent, do not make any on-demand certificate requests.
|
||||||
var lastIssueTime time.Time
|
var lastIssueTime time.Time
|
||||||
var lastIssueTimeMu sync.Mutex
|
var lastIssueTimeMu sync.Mutex
|
||||||
|
|
||||||
|
// ClientHelloTelemetry determines whether to report
|
||||||
|
// TLS ClientHellos to telemetry. Disable if doing
|
||||||
|
// it from a different package.
|
||||||
|
var ClientHelloTelemetry = true
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/mholt/caddy"
|
"github.com/mholt/caddy"
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -174,9 +175,11 @@ func setupTLS(c *caddy.Controller) error {
|
||||||
case "max_certs":
|
case "max_certs":
|
||||||
c.Args(&maxCerts)
|
c.Args(&maxCerts)
|
||||||
config.OnDemand = true
|
config.OnDemand = true
|
||||||
|
telemetry.Increment("tls_on_demand_count")
|
||||||
case "ask":
|
case "ask":
|
||||||
c.Args(&askURL)
|
c.Args(&askURL)
|
||||||
config.OnDemand = true
|
config.OnDemand = true
|
||||||
|
telemetry.Increment("tls_on_demand_count")
|
||||||
case "dns":
|
case "dns":
|
||||||
args := c.RemainingArgs()
|
args := c.RemainingArgs()
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
|
@ -283,6 +286,7 @@ func setupTLS(c *caddy.Controller) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("self-signed: %v", err)
|
return fmt.Errorf("self-signed: %v", err)
|
||||||
}
|
}
|
||||||
|
telemetry.Increment("tls_self_signed_count")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
6
dist/CHANGES.txt
vendored
6
dist/CHANGES.txt
vendored
|
@ -1,5 +1,11 @@
|
||||||
CHANGES
|
CHANGES
|
||||||
|
|
||||||
|
0.11 (May 10, 2018)
|
||||||
|
- Built with Go 1.10.2
|
||||||
|
- Integrated optional telemetry client
|
||||||
|
- proxy: Fixed file descriptor leak
|
||||||
|
|
||||||
|
|
||||||
0.10.14 (April 19, 2018)
|
0.10.14 (April 19, 2018)
|
||||||
- tls: Fix error handling bug when obtaining certificates
|
- tls: Fix error handling bug when obtaining certificates
|
||||||
|
|
||||||
|
|
2
dist/README.txt
vendored
2
dist/README.txt
vendored
|
@ -1,4 +1,4 @@
|
||||||
CADDY 0.10.14
|
CADDY 0.11
|
||||||
|
|
||||||
Website
|
Website
|
||||||
https://caddyserver.com
|
https://caddyserver.com
|
||||||
|
|
53
plugins.go
53
plugins.go
|
@ -54,32 +54,58 @@ var (
|
||||||
|
|
||||||
// DescribePlugins returns a string describing the registered plugins.
|
// DescribePlugins returns a string describing the registered plugins.
|
||||||
func DescribePlugins() string {
|
func DescribePlugins() string {
|
||||||
|
pl := ListPlugins()
|
||||||
|
|
||||||
str := "Server types:\n"
|
str := "Server types:\n"
|
||||||
for name := range serverTypes {
|
for _, name := range pl["server_types"] {
|
||||||
str += " " + name + "\n"
|
str += " " + name + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the loaders in registration order
|
|
||||||
str += "\nCaddyfile loaders:\n"
|
str += "\nCaddyfile loaders:\n"
|
||||||
|
for _, name := range pl["caddyfile_loaders"] {
|
||||||
|
str += " " + name + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pl["event_hooks"]) > 0 {
|
||||||
|
str += "\nEvent hook plugins:\n"
|
||||||
|
for _, name := range pl["event_hooks"] {
|
||||||
|
str += " hook." + name + "\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
str += "\nOther plugins:\n"
|
||||||
|
for _, name := range pl["others"] {
|
||||||
|
str += " " + name + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPlugins makes a list of the registered plugins,
|
||||||
|
// keyed by plugin type.
|
||||||
|
func ListPlugins() map[string][]string {
|
||||||
|
p := make(map[string][]string)
|
||||||
|
|
||||||
|
// server type plugins
|
||||||
|
for name := range serverTypes {
|
||||||
|
p["server_types"] = append(p["server_types"], name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// caddyfile loaders in registration order
|
||||||
for _, loader := range caddyfileLoaders {
|
for _, loader := range caddyfileLoaders {
|
||||||
str += " " + loader.name + "\n"
|
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name)
|
||||||
}
|
}
|
||||||
if defaultCaddyfileLoader.name != "" {
|
if defaultCaddyfileLoader.name != "" {
|
||||||
str += " " + defaultCaddyfileLoader.name + "\n"
|
p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the event hook plugins
|
// List the event hook plugins
|
||||||
hooks := ""
|
|
||||||
eventHooks.Range(func(k, _ interface{}) bool {
|
eventHooks.Range(func(k, _ interface{}) bool {
|
||||||
hooks += " hook." + k.(string) + "\n"
|
p["event_hooks"] = append(p["event_hooks"], k.(string))
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
if hooks != "" {
|
|
||||||
str += "\nEvent hook plugins:\n"
|
|
||||||
str += hooks
|
|
||||||
}
|
|
||||||
|
|
||||||
// Let's alphabetize the rest of these...
|
// alphabetize the rest of the plugins
|
||||||
var others []string
|
var others []string
|
||||||
for stype, stypePlugins := range plugins {
|
for stype, stypePlugins := range plugins {
|
||||||
for name := range stypePlugins {
|
for name := range stypePlugins {
|
||||||
|
@ -93,12 +119,11 @@ func DescribePlugins() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Strings(others)
|
sort.Strings(others)
|
||||||
str += "\nOther plugins:\n"
|
|
||||||
for _, name := range others {
|
for _, name := range others {
|
||||||
str += " " + name + "\n"
|
p["others"] = append(p["others"], name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return str
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidDirectives returns the list of all directives that are
|
// ValidDirectives returns the list of all directives that are
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TrapSignals create signal handlers for all applicable signals for this
|
// TrapSignals create signal handlers for all applicable signals for this
|
||||||
|
@ -52,6 +54,9 @@ func trapSignalsCrossPlatform() {
|
||||||
|
|
||||||
log.Println("[INFO] SIGINT: Shutting down")
|
log.Println("[INFO] SIGINT: Shutting down")
|
||||||
|
|
||||||
|
telemetry.AppendUnique("sigtrap", "SIGINT")
|
||||||
|
go telemetry.StopEmitting() // not guaranteed to finish in time; that's OK (just don't block!)
|
||||||
|
|
||||||
// important cleanup actions before shutdown callbacks
|
// important cleanup actions before shutdown callbacks
|
||||||
for _, f := range OnProcessExit {
|
for _, f := range OnProcessExit {
|
||||||
f()
|
f()
|
||||||
|
|
|
@ -21,6 +21,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/mholt/caddy/telemetry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// trapSignalsPosix captures POSIX-only signals.
|
// trapSignalsPosix captures POSIX-only signals.
|
||||||
|
@ -49,10 +51,15 @@ func trapSignalsPosix() {
|
||||||
log.Printf("[ERROR] SIGTERM stop: %v", err)
|
log.Printf("[ERROR] SIGTERM stop: %v", err)
|
||||||
exitCode = 3
|
exitCode = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
telemetry.AppendUnique("sigtrap", "SIGTERM")
|
||||||
|
go telemetry.StopEmitting() // won't finish in time, but that's OK - just don't block
|
||||||
|
|
||||||
os.Exit(exitCode)
|
os.Exit(exitCode)
|
||||||
|
|
||||||
case syscall.SIGUSR1:
|
case syscall.SIGUSR1:
|
||||||
log.Println("[INFO] SIGUSR1: Reloading")
|
log.Println("[INFO] SIGUSR1: Reloading")
|
||||||
|
go telemetry.AppendUnique("sigtrap", "SIGUSR1")
|
||||||
|
|
||||||
// Start with the existing Caddyfile
|
// Start with the existing Caddyfile
|
||||||
caddyfileToUse, inst, err := getCurrentCaddyfile()
|
caddyfileToUse, inst, err := getCurrentCaddyfile()
|
||||||
|
@ -92,12 +99,14 @@ func trapSignalsPosix() {
|
||||||
|
|
||||||
case syscall.SIGUSR2:
|
case syscall.SIGUSR2:
|
||||||
log.Println("[INFO] SIGUSR2: Upgrading")
|
log.Println("[INFO] SIGUSR2: Upgrading")
|
||||||
|
go telemetry.AppendUnique("sigtrap", "SIGUSR2")
|
||||||
if err := Upgrade(); err != nil {
|
if err := Upgrade(); err != nil {
|
||||||
log.Printf("[ERROR] SIGUSR2: upgrading: %v", err)
|
log.Printf("[ERROR] SIGUSR2: upgrading: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
case syscall.SIGHUP:
|
case syscall.SIGHUP:
|
||||||
// ignore; this signal is sometimes sent outside of the user's control
|
// ignore; this signal is sometimes sent outside of the user's control
|
||||||
|
go telemetry.AppendUnique("sigtrap", "SIGHUP")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
307
telemetry/collection.go
Normal file
307
telemetry/collection.go
Normal file
|
@ -0,0 +1,307 @@
|
||||||
|
// Copyright 2015 Light Code Labs, LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Init initializes this package so that it may
|
||||||
|
// be used. Do not call this function more than
|
||||||
|
// once. Init panics if it is called more than
|
||||||
|
// once or if the UUID value is empty. Once this
|
||||||
|
// function is called, the rest of the package
|
||||||
|
// may safely be used. If this function is not
|
||||||
|
// called, the collector functions may still be
|
||||||
|
// invoked, but they will be no-ops.
|
||||||
|
//
|
||||||
|
// Any metrics keys that are passed in the second
|
||||||
|
// argument will be permanently disabled for the
|
||||||
|
// lifetime of the process.
|
||||||
|
func Init(instanceID uuid.UUID, disabledMetricsKeys []string) {
|
||||||
|
if enabled {
|
||||||
|
panic("already initialized")
|
||||||
|
}
|
||||||
|
if str := instanceID.String(); str == "" ||
|
||||||
|
str == "00000000-0000-0000-0000-000000000000" {
|
||||||
|
panic("empty UUID")
|
||||||
|
}
|
||||||
|
instanceUUID = instanceID
|
||||||
|
disabledMetricsMu.Lock()
|
||||||
|
for _, key := range disabledMetricsKeys {
|
||||||
|
disabledMetrics[strings.TrimSpace(key)] = false
|
||||||
|
}
|
||||||
|
disabledMetricsMu.Unlock()
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartEmitting sends the current payload and begins the
|
||||||
|
// transmission cycle for updates. This is the first
|
||||||
|
// update sent, and future ones will be sent until
|
||||||
|
// StopEmitting is called.
|
||||||
|
//
|
||||||
|
// This function is non-blocking (it spawns a new goroutine).
|
||||||
|
//
|
||||||
|
// This function panics if it was called more than once.
|
||||||
|
// It is a no-op if this package was not initialized.
|
||||||
|
func StartEmitting() {
|
||||||
|
if !enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updateTimerMu.Lock()
|
||||||
|
if updateTimer != nil {
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
panic("updates already started")
|
||||||
|
}
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
updateMu.Lock()
|
||||||
|
if updating {
|
||||||
|
updateMu.Unlock()
|
||||||
|
panic("update already in progress")
|
||||||
|
}
|
||||||
|
updateMu.Unlock()
|
||||||
|
go logEmit(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopEmitting sends the current payload and terminates
|
||||||
|
// the update cycle. No more updates will be sent.
|
||||||
|
//
|
||||||
|
// It is a no-op if the package was never initialized
|
||||||
|
// or if emitting was never started.
|
||||||
|
//
|
||||||
|
// NOTE: This function is blocking. Run in a goroutine if
|
||||||
|
// you want to guarantee no blocking at critical times
|
||||||
|
// like exiting the program.
|
||||||
|
func StopEmitting() {
|
||||||
|
if !enabled {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updateTimerMu.Lock()
|
||||||
|
if updateTimer == nil {
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
logEmit(true) // likely too early; may take minutes to return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset empties the current payload buffer.
|
||||||
|
func Reset() {
|
||||||
|
resetBuffer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set puts a value in the buffer to be included
|
||||||
|
// in the next emission. It overwrites any
|
||||||
|
// previous value.
|
||||||
|
//
|
||||||
|
// This function is safe for multiple goroutines,
|
||||||
|
// and it is recommended to call this using the
|
||||||
|
// go keyword after the call to SendHello so it
|
||||||
|
// doesn't block crucial code.
|
||||||
|
func Set(key string, val interface{}) {
|
||||||
|
if !enabled || isDisabled(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferMu.Lock()
|
||||||
|
if _, ok := buffer[key]; !ok {
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferItemCount++
|
||||||
|
}
|
||||||
|
buffer[key] = val
|
||||||
|
bufferMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNested puts a value in the buffer to be included
|
||||||
|
// in the next emission, nested under the top-level key
|
||||||
|
// as subkey. It overwrites any previous value.
|
||||||
|
//
|
||||||
|
// This function is safe for multiple goroutines,
|
||||||
|
// and it is recommended to call this using the
|
||||||
|
// go keyword after the call to SendHello so it
|
||||||
|
// doesn't block crucial code.
|
||||||
|
func SetNested(key, subkey string, val interface{}) {
|
||||||
|
if !enabled || isDisabled(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferMu.Lock()
|
||||||
|
if topLevel, ok1 := buffer[key]; ok1 {
|
||||||
|
topLevelMap, ok2 := topLevel.(map[string]interface{})
|
||||||
|
if !ok2 {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
log.Printf("[PANIC] Telemetry: key %s is already used for non-nested-map value", key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, ok3 := topLevelMap[subkey]; !ok3 {
|
||||||
|
// don't exceed max buffer size
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferItemCount++
|
||||||
|
}
|
||||||
|
topLevelMap[subkey] = val
|
||||||
|
} else {
|
||||||
|
// don't exceed max buffer size
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferItemCount++
|
||||||
|
buffer[key] = map[string]interface{}{subkey: val}
|
||||||
|
}
|
||||||
|
bufferMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends value to a list named key.
|
||||||
|
// If key is new, a new list will be created.
|
||||||
|
// If key maps to a type that is not a list,
|
||||||
|
// a panic is logged, and this is a no-op.
|
||||||
|
func Append(key string, value interface{}) {
|
||||||
|
if !enabled || isDisabled(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferMu.Lock()
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: Test this...
|
||||||
|
bufVal, inBuffer := buffer[key]
|
||||||
|
sliceVal, sliceOk := bufVal.([]interface{})
|
||||||
|
if inBuffer && !sliceOk {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
log.Printf("[PANIC] Telemetry: key %s already used for non-slice value", key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if sliceVal == nil {
|
||||||
|
buffer[key] = []interface{}{value}
|
||||||
|
} else if sliceOk {
|
||||||
|
buffer[key] = append(sliceVal, value)
|
||||||
|
}
|
||||||
|
bufferItemCount++
|
||||||
|
bufferMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendUnique adds value to a set named key.
|
||||||
|
// Set items are unordered. Values in the set
|
||||||
|
// are unique, but how many times they are
|
||||||
|
// appended is counted. The value must be
|
||||||
|
// hashable.
|
||||||
|
//
|
||||||
|
// If key is new, a new set will be created for
|
||||||
|
// values with that key. If key maps to a type
|
||||||
|
// that is not a counting set, a panic is logged,
|
||||||
|
// and this is a no-op.
|
||||||
|
func AppendUnique(key string, value interface{}) {
|
||||||
|
if !enabled || isDisabled(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferMu.Lock()
|
||||||
|
bufVal, inBuffer := buffer[key]
|
||||||
|
setVal, setOk := bufVal.(countingSet)
|
||||||
|
if inBuffer && !setOk {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
log.Printf("[PANIC] Telemetry: key %s already used for non-counting-set value", key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if setVal == nil {
|
||||||
|
// ensure the buffer is not too full, then add new unique value
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
buffer[key] = countingSet{value: 1}
|
||||||
|
bufferItemCount++
|
||||||
|
} else if setOk {
|
||||||
|
// unique value already exists, so just increment counter
|
||||||
|
setVal[value]++
|
||||||
|
}
|
||||||
|
bufferMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds amount to a value named key.
|
||||||
|
// If it does not exist, it is created with
|
||||||
|
// a value of 1. If key maps to a type that
|
||||||
|
// is not an integer, a panic is logged,
|
||||||
|
// and this is a no-op.
|
||||||
|
func Add(key string, amount int) {
|
||||||
|
atomicAdd(key, amount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment is a shortcut for Add(key, 1)
|
||||||
|
func Increment(key string) {
|
||||||
|
atomicAdd(key, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// atomicAdd adds amount (negative to subtract)
|
||||||
|
// to key.
|
||||||
|
func atomicAdd(key string, amount int) {
|
||||||
|
if !enabled || isDisabled(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferMu.Lock()
|
||||||
|
bufVal, inBuffer := buffer[key]
|
||||||
|
intVal, intOk := bufVal.(int)
|
||||||
|
if inBuffer && !intOk {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
log.Printf("[PANIC] Telemetry: key %s already used for non-integer value", key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !inBuffer {
|
||||||
|
if bufferItemCount >= maxBufferItems {
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufferItemCount++
|
||||||
|
}
|
||||||
|
buffer[key] = intVal + amount
|
||||||
|
bufferMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// FastHash hashes input using a 32-bit hashing algorithm
|
||||||
|
// that is fast, and returns the hash as a hex-encoded string.
|
||||||
|
// Do not use this for cryptographic purposes.
|
||||||
|
func FastHash(input []byte) string {
|
||||||
|
h := fnv.New32a()
|
||||||
|
h.Write(input)
|
||||||
|
return fmt.Sprintf("%x", h.Sum32())
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDisabled returns whether key is
|
||||||
|
// a disabled metric key. ALL collection
|
||||||
|
// functions should call this and not
|
||||||
|
// save the value if this returns true.
|
||||||
|
func isDisabled(key string) bool {
|
||||||
|
// for keys that are augmented with data, such as
|
||||||
|
// "tls_client_hello_ua:<hash>", just
|
||||||
|
// check the prefix "tls_client_hello_ua"
|
||||||
|
checkKey := key
|
||||||
|
if idx := strings.Index(key, ":"); idx > -1 {
|
||||||
|
checkKey = key[:idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
disabledMetricsMu.RLock()
|
||||||
|
_, ok := disabledMetrics[checkKey]
|
||||||
|
disabledMetricsMu.RUnlock()
|
||||||
|
return ok
|
||||||
|
}
|
109
telemetry/collection_test.go
Normal file
109
telemetry/collection_test.go
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
// Copyright 2015 Light Code Labs, LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInit(t *testing.T) {
|
||||||
|
reset()
|
||||||
|
|
||||||
|
id := doInit(t) // should not panic
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r == nil {
|
||||||
|
t.Errorf("Second call to Init should have panicked")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
Init(id, nil) // should panic
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitEmptyUUID(t *testing.T) {
|
||||||
|
reset()
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r == nil {
|
||||||
|
t.Errorf("Call to Init with empty UUID should have panicked")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
Init(uuid.UUID([16]byte{}), nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSet(t *testing.T) {
|
||||||
|
reset()
|
||||||
|
|
||||||
|
// should be no-op since we haven't called Init() yet
|
||||||
|
Set("test1", "foobar")
|
||||||
|
if _, ok := buffer["test"]; ok {
|
||||||
|
t.Errorf("Should not have inserted item when not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
// should work after we've initialized
|
||||||
|
doInit(t)
|
||||||
|
Set("test1", "foobar")
|
||||||
|
val, ok := buffer["test1"]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Expected value to be in buffer, but it wasn't")
|
||||||
|
} else if val.(string) != "foobar" {
|
||||||
|
t.Errorf("Expected 'foobar', got '%v'", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// should not overfill buffer
|
||||||
|
maxBufferItemsTmp := maxBufferItems
|
||||||
|
maxBufferItems = 10
|
||||||
|
for i := 0; i < maxBufferItems+1; i++ {
|
||||||
|
Set(fmt.Sprintf("overfill_%d", i), "foobar")
|
||||||
|
}
|
||||||
|
if len(buffer) > maxBufferItems {
|
||||||
|
t.Errorf("Should not exceed max buffer size (%d); has %d items",
|
||||||
|
maxBufferItems, len(buffer))
|
||||||
|
}
|
||||||
|
maxBufferItems = maxBufferItemsTmp
|
||||||
|
|
||||||
|
// Should overwrite values
|
||||||
|
Set("test1", "foobar2")
|
||||||
|
val, ok = buffer["test1"]
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Expected value to be in buffer, but it wasn't")
|
||||||
|
} else if val.(string) != "foobar2" {
|
||||||
|
t.Errorf("Expected 'foobar2', got '%v'", val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// doInit calls Init() with a valid UUID
|
||||||
|
// and returns it.
|
||||||
|
func doInit(t *testing.T) uuid.UUID {
|
||||||
|
id, err := uuid.Parse(testUUID)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not make UUID: %v", err)
|
||||||
|
}
|
||||||
|
Init(id, nil)
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset resets all the lovely package-level state;
|
||||||
|
// can be used as a set up function in tests.
|
||||||
|
func reset() {
|
||||||
|
instanceUUID = uuid.UUID{}
|
||||||
|
buffer = make(map[string]interface{})
|
||||||
|
bufferItemCount = 0
|
||||||
|
updating = false
|
||||||
|
enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
const testUUID = "0b6cfa22-0d4c-11e8-b11b-7a0058e13201"
|
428
telemetry/telemetry.go
Normal file
428
telemetry/telemetry.go
Normal file
|
@ -0,0 +1,428 @@
|
||||||
|
// Copyright 2015 Light Code Labs, LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package telemetry implements the client for server-side telemetry
|
||||||
|
// of the network. Functions in this package are synchronous and blocking
|
||||||
|
// unless otherwise specified. For convenience, most functions here do
|
||||||
|
// not return errors, but errors are logged to the standard logger.
|
||||||
|
//
|
||||||
|
// To use this package, first call Init(). You can then call any of the
|
||||||
|
// collection/aggregation functions. Call StartEmitting() when you are
|
||||||
|
// ready to begin sending telemetry updates.
|
||||||
|
//
|
||||||
|
// When collecting metrics (functions like Set, AppendUnique, or Increment),
|
||||||
|
// it may be desirable and even recommended to invoke them in a new
|
||||||
|
// goroutine in case there is lock contention; they are thread-safe (unless
|
||||||
|
// noted), and you may not want them to block the main thread of execution.
|
||||||
|
// However, sometimes blocking may be necessary too; for example, adding
|
||||||
|
// startup metrics to the buffer before the call to StartEmitting().
|
||||||
|
//
|
||||||
|
// This package is designed to be as fast and space-efficient as reasonably
|
||||||
|
// possible, so that it does not disrupt the flow of execution.
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// logEmit calls emit and then logs the error, if any.
|
||||||
|
// See docs for emit.
|
||||||
|
func logEmit(final bool) {
|
||||||
|
err := emit(final)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[ERROR] Sending telemetry: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// emit sends an update to the telemetry server.
|
||||||
|
// Set final to true if this is the last call to emit.
|
||||||
|
// If final is true, no future updates will be scheduled.
|
||||||
|
// Otherwise, the next update will be scheduled.
|
||||||
|
func emit(final bool) error {
|
||||||
|
if !enabled {
|
||||||
|
return fmt.Errorf("telemetry not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
// some metrics are updated/set at time of emission
|
||||||
|
setEmitTimeMetrics()
|
||||||
|
|
||||||
|
// ensure only one update happens at a time;
|
||||||
|
// skip update if previous one still in progress
|
||||||
|
updateMu.Lock()
|
||||||
|
if updating {
|
||||||
|
updateMu.Unlock()
|
||||||
|
log.Println("[NOTICE] Skipping this telemetry update because previous one is still working")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
updating = true
|
||||||
|
updateMu.Unlock()
|
||||||
|
defer func() {
|
||||||
|
updateMu.Lock()
|
||||||
|
updating = false
|
||||||
|
updateMu.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// terminate any pending update if this is the last one
|
||||||
|
if final {
|
||||||
|
stopUpdateTimer()
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadBytes, err := makePayloadAndResetBuffer()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// this will hold the server's reply
|
||||||
|
var reply Response
|
||||||
|
|
||||||
|
// transmit the payload - use a loop to retry in case of failure
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
if i > 0 && err != nil {
|
||||||
|
// don't hammer the server; first failure might have been
|
||||||
|
// a fluke, but back off more after that
|
||||||
|
log.Printf("[WARNING] Sending telemetry (attempt %d): %v - backing off and retrying", i, err)
|
||||||
|
time.Sleep(time.Duration((i+1)*(i+1)*(i+1)) * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// send it
|
||||||
|
var resp *http.Response
|
||||||
|
resp, err = httpClient.Post(endpoint+instanceUUID.String(), "application/json", bytes.NewReader(payloadBytes))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for any special-case response codes
|
||||||
|
if resp.StatusCode == http.StatusGone {
|
||||||
|
// the endpoint has been deprecated and is no longer servicing clients
|
||||||
|
err = fmt.Errorf("telemetry server replied with HTTP %d; upgrade required", resp.StatusCode)
|
||||||
|
if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" {
|
||||||
|
bodyBytes, readErr := ioutil.ReadAll(resp.Body)
|
||||||
|
if readErr != nil {
|
||||||
|
log.Printf("[ERROR] Reading response body from server: %v", readErr)
|
||||||
|
}
|
||||||
|
err = fmt.Errorf("%v - %s", err, bodyBytes)
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
reply.Stop = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if resp.StatusCode == http.StatusUnavailableForLegalReasons {
|
||||||
|
// the endpoint is unavailable, at least to this client, for legal reasons (!)
|
||||||
|
err = fmt.Errorf("telemetry server replied with HTTP %d %s: please consult the project website and developers for guidance", resp.StatusCode, resp.Status)
|
||||||
|
if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" {
|
||||||
|
bodyBytes, readErr := ioutil.ReadAll(resp.Body)
|
||||||
|
if readErr != nil {
|
||||||
|
log.Printf("[ERROR] Reading response body from server: %v", readErr)
|
||||||
|
}
|
||||||
|
err = fmt.Errorf("%v - %s", err, bodyBytes)
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
reply.Stop = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// okay, ensure we can interpret the response
|
||||||
|
if ct := resp.Header.Get("Content-Type"); (resp.StatusCode < 300 || resp.StatusCode >= 400) &&
|
||||||
|
!strings.Contains(ct, "json") {
|
||||||
|
err = fmt.Errorf("telemetry server replied with unknown content-type: '%s' and HTTP %s", ct, resp.Status)
|
||||||
|
resp.Body.Close()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// read the response body
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&reply)
|
||||||
|
resp.Body.Close() // close response body as soon as we're done with it
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// update the list of enabled/disabled keys, if any
|
||||||
|
for _, key := range reply.EnableKeys {
|
||||||
|
disabledMetricsMu.Lock()
|
||||||
|
// only re-enable this metric if it is temporarily disabled
|
||||||
|
if temp, ok := disabledMetrics[key]; ok && temp {
|
||||||
|
delete(disabledMetrics, key)
|
||||||
|
}
|
||||||
|
disabledMetricsMu.Unlock()
|
||||||
|
}
|
||||||
|
for _, key := range reply.DisableKeys {
|
||||||
|
disabledMetricsMu.Lock()
|
||||||
|
disabledMetrics[key] = true // all remotely-disabled keys are "temporarily" disabled
|
||||||
|
disabledMetricsMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// make sure we didn't send the update too soon; if so,
|
||||||
|
// just wait and try again -- this is a special case of
|
||||||
|
// error that we handle differently, as you can see
|
||||||
|
if resp.StatusCode == http.StatusTooManyRequests {
|
||||||
|
if reply.NextUpdate <= 0 {
|
||||||
|
raStr := resp.Header.Get("Retry-After")
|
||||||
|
if ra, err := strconv.Atoi(raStr); err == nil {
|
||||||
|
reply.NextUpdate = time.Duration(ra) * time.Second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !final {
|
||||||
|
log.Printf("[NOTICE] Sending telemetry: we were too early; waiting %s before trying again", reply.NextUpdate)
|
||||||
|
time.Sleep(reply.NextUpdate)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if resp.StatusCode >= 400 {
|
||||||
|
err = fmt.Errorf("telemetry server returned status code %d", resp.StatusCode)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err == nil && !final {
|
||||||
|
// (remember, if there was an error, we return it
|
||||||
|
// below, so it WILL get logged if it's supposed to)
|
||||||
|
log.Println("[INFO] Sending telemetry: success")
|
||||||
|
}
|
||||||
|
|
||||||
|
// even if there was an error after all retries, we should
|
||||||
|
// schedule the next update using our default update
|
||||||
|
// interval because the server might be healthy later
|
||||||
|
|
||||||
|
// ensure we won't slam the telemetry server; add a little variance
|
||||||
|
if reply.NextUpdate < 1*time.Second {
|
||||||
|
reply.NextUpdate = defaultUpdateInterval + time.Duration(rand.Int63n(int64(1*time.Minute)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// schedule the next update (if this wasn't the last one and
|
||||||
|
// if the remote server didn't tell us to stop sending)
|
||||||
|
if !final && !reply.Stop {
|
||||||
|
updateTimerMu.Lock()
|
||||||
|
updateTimer = time.AfterFunc(reply.NextUpdate, func() {
|
||||||
|
logEmit(false)
|
||||||
|
})
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopUpdateTimer() {
|
||||||
|
updateTimerMu.Lock()
|
||||||
|
updateTimer.Stop()
|
||||||
|
updateTimer = nil
|
||||||
|
updateTimerMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// setEmitTimeMetrics sets some metrics that should
|
||||||
|
// be recorded just before emitting.
|
||||||
|
func setEmitTimeMetrics() {
|
||||||
|
Set("goroutines", runtime.NumGoroutine())
|
||||||
|
|
||||||
|
var mem runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&mem)
|
||||||
|
SetNested("memory", "heap_alloc", mem.HeapAlloc)
|
||||||
|
SetNested("memory", "sys", mem.Sys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// makePayloadAndResetBuffer prepares a payload
|
||||||
|
// by emptying the collection buffer. It returns
|
||||||
|
// the bytes of the payload to send to the server.
|
||||||
|
// Since the buffer is reset by this, if the
|
||||||
|
// resulting byte slice is lost, the payload is
|
||||||
|
// gone with it.
|
||||||
|
func makePayloadAndResetBuffer() ([]byte, error) {
|
||||||
|
bufCopy := resetBuffer()
|
||||||
|
|
||||||
|
// encode payload in preparation for transmission
|
||||||
|
payload := Payload{
|
||||||
|
InstanceID: instanceUUID.String(),
|
||||||
|
Timestamp: time.Now().UTC(),
|
||||||
|
Data: bufCopy,
|
||||||
|
}
|
||||||
|
return json.Marshal(payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetBuffer makes a local pointer to the buffer,
|
||||||
|
// then resets the buffer by assigning to be a newly-
|
||||||
|
// made value to clear it out, then sets the buffer
|
||||||
|
// item count to 0. It returns the copied pointer to
|
||||||
|
// the original map so the old buffer value can be
|
||||||
|
// used locally.
|
||||||
|
func resetBuffer() map[string]interface{} {
|
||||||
|
bufferMu.Lock()
|
||||||
|
bufCopy := buffer
|
||||||
|
buffer = make(map[string]interface{})
|
||||||
|
bufferItemCount = 0
|
||||||
|
bufferMu.Unlock()
|
||||||
|
return bufCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response contains the body of a response from the
|
||||||
|
// telemetry server.
|
||||||
|
type Response struct {
|
||||||
|
// NextUpdate is how long to wait before the next update.
|
||||||
|
NextUpdate time.Duration `json:"next_update"`
|
||||||
|
|
||||||
|
// Stop instructs the telemetry server to stop sending
|
||||||
|
// telemetry. This would only be done under extenuating
|
||||||
|
// circumstances, but we are prepared for it nonetheless.
|
||||||
|
Stop bool `json:"stop,omitempty"`
|
||||||
|
|
||||||
|
// Error will be populated with an error message, if any.
|
||||||
|
// This field should be empty if the status code is < 400.
|
||||||
|
Error string `json:"error,omitempty"`
|
||||||
|
|
||||||
|
// DisableKeys will contain a list of keys/metrics that
|
||||||
|
// should NOT be sent until further notice. The client
|
||||||
|
// must NOT store these items in its buffer or send them
|
||||||
|
// to the telemetry server while they are disabled. If
|
||||||
|
// this list and EnableKeys have the same value (which is
|
||||||
|
// not supposed to happen), this field should dominate.
|
||||||
|
DisableKeys []string `json:"disable_keys,omitempty"`
|
||||||
|
|
||||||
|
// EnableKeys will contain a list of keys/metrics that
|
||||||
|
// MAY be sent until further notice.
|
||||||
|
EnableKeys []string `json:"enable_keys,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Payload is the data that gets sent to the telemetry server.
|
||||||
|
type Payload struct {
|
||||||
|
// The universally unique ID of the instance
|
||||||
|
InstanceID string `json:"instance_id"`
|
||||||
|
|
||||||
|
// The UTC timestamp of the transmission
|
||||||
|
Timestamp time.Time `json:"timestamp"`
|
||||||
|
|
||||||
|
// The timestamp before which the next update is expected
|
||||||
|
// (NOT populated by client - the server fills this in
|
||||||
|
// before it stores the data)
|
||||||
|
ExpectNext time.Time `json:"expect_next,omitempty"`
|
||||||
|
|
||||||
|
// The metrics
|
||||||
|
Data map[string]interface{} `json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns the value of the data keyed by key
|
||||||
|
// if it is an integer; otherwise it returns 0.
|
||||||
|
func (p Payload) Int(key string) int {
|
||||||
|
val, _ := p.Data[key]
|
||||||
|
switch p.Data[key].(type) {
|
||||||
|
case int:
|
||||||
|
return val.(int)
|
||||||
|
case float64: // after JSON-decoding, int becomes float64...
|
||||||
|
return int(val.(float64))
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// countingSet implements a set that counts how many
|
||||||
|
// times a key is inserted. It marshals to JSON in a
|
||||||
|
// way such that keys are converted to values next
|
||||||
|
// to their associated counts.
|
||||||
|
type countingSet map[interface{}]int
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaler interface.
|
||||||
|
// It converts the set to an array so that the values
|
||||||
|
// are JSON object values instead of keys, since keys
|
||||||
|
// are difficult to query in databases.
|
||||||
|
func (s countingSet) MarshalJSON() ([]byte, error) {
|
||||||
|
type Item struct {
|
||||||
|
Value interface{} `json:"value"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
var list []Item
|
||||||
|
|
||||||
|
for k, v := range s {
|
||||||
|
list = append(list, Item{Value: k, Count: v})
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// httpClient should be used for HTTP requests. It
|
||||||
|
// is configured with a timeout for reliability.
|
||||||
|
httpClient = http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSHandshakeTimeout: 30 * time.Second,
|
||||||
|
DisableKeepAlives: true,
|
||||||
|
},
|
||||||
|
Timeout: 1 * time.Minute,
|
||||||
|
}
|
||||||
|
|
||||||
|
// buffer holds the data that we are building up to send.
|
||||||
|
buffer = make(map[string]interface{})
|
||||||
|
bufferItemCount = 0
|
||||||
|
bufferMu sync.RWMutex // protects both the buffer and its count
|
||||||
|
|
||||||
|
// updating is used to ensure only one
|
||||||
|
// update happens at a time.
|
||||||
|
updating bool
|
||||||
|
updateMu sync.Mutex
|
||||||
|
|
||||||
|
// updateTimer fires off the next update.
|
||||||
|
// If no update is scheduled, this is nil.
|
||||||
|
updateTimer *time.Timer
|
||||||
|
updateTimerMu sync.Mutex
|
||||||
|
|
||||||
|
// disabledMetrics is a set of metric keys
|
||||||
|
// that should NOT be saved to the buffer
|
||||||
|
// or sent to the telemetry server. The value
|
||||||
|
// indicates whether the entry is temporary.
|
||||||
|
// If the value is true, it may be removed if
|
||||||
|
// the metric is re-enabled remotely later. If
|
||||||
|
// the value is false, it is permanent
|
||||||
|
// (presumably becaues the user explicitly
|
||||||
|
// disabled it) and can only be re-enabled
|
||||||
|
// with user consent.
|
||||||
|
disabledMetrics = make(map[string]bool)
|
||||||
|
disabledMetricsMu sync.RWMutex
|
||||||
|
|
||||||
|
// instanceUUID is the ID of the current instance.
|
||||||
|
// This MUST be set to emit telemetry.
|
||||||
|
// This MUST NOT be openly exposed to clients, for privacy.
|
||||||
|
instanceUUID uuid.UUID
|
||||||
|
|
||||||
|
// enabled indicates whether the package has
|
||||||
|
// been initialized and can be actively used.
|
||||||
|
enabled bool
|
||||||
|
|
||||||
|
// maxBufferItems is the maximum number of items we'll allow
|
||||||
|
// in the buffer before we start dropping new ones, in a
|
||||||
|
// rough (simple) attempt to keep memory use under control.
|
||||||
|
maxBufferItems = 100000
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// endpoint is the base URL to remote telemetry server;
|
||||||
|
// the instance ID will be appended to it.
|
||||||
|
endpoint = "https://telemetry.caddyserver.com/v1/update/"
|
||||||
|
|
||||||
|
// defaultUpdateInterval is how long to wait before emitting
|
||||||
|
// more telemetry data if all retires fail. This value is
|
||||||
|
// only used if the client receives a nonsensical value, or
|
||||||
|
// doesn't send one at all, or if a connection can't be made,
|
||||||
|
// likely indicating a problem with the server. Thus, this
|
||||||
|
// value should be a long duration to help alleviate extra
|
||||||
|
// load on the server.
|
||||||
|
defaultUpdateInterval = 1 * time.Hour
|
||||||
|
)
|
59
telemetry/telemetry_test.go
Normal file
59
telemetry/telemetry_test.go
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2015 Light Code Labs, LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package telemetry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMakePayloadAndResetBuffer(t *testing.T) {
|
||||||
|
reset()
|
||||||
|
id := doInit(t)
|
||||||
|
|
||||||
|
buffer = map[string]interface{}{
|
||||||
|
"foo1": "bar1",
|
||||||
|
"foo2": "bar2",
|
||||||
|
}
|
||||||
|
bufferItemCount = 2
|
||||||
|
|
||||||
|
payloadBytes, err := makePayloadAndResetBuffer()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error making payload bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(buffer) != 0 {
|
||||||
|
t.Errorf("Expected buffer len to be 0, got %d", len(buffer))
|
||||||
|
}
|
||||||
|
if bufferItemCount != 0 {
|
||||||
|
t.Errorf("Expected buffer item count to be 0, got %d", bufferItemCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
var payload Payload
|
||||||
|
err = json.Unmarshal(payloadBytes, &payload)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error deserializing payload: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload.InstanceID != id.String() {
|
||||||
|
t.Errorf("Expected instance ID to be set to '%s' but got '%s'", testUUID, payload.InstanceID)
|
||||||
|
}
|
||||||
|
if payload.Data == nil {
|
||||||
|
t.Errorf("Expected data to be set, but was nil")
|
||||||
|
}
|
||||||
|
if payload.Timestamp.IsZero() {
|
||||||
|
t.Errorf("Expected timestamp to be set, but was zero value")
|
||||||
|
}
|
||||||
|
}
|
9
vendor/github.com/codahale/aesnicheck/asm_amd64.s
generated
vendored
9
vendor/github.com/codahale/aesnicheck/asm_amd64.s
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
// func HasAESNI() bool
|
|
||||||
TEXT ·HasAESNI(SB),$0
|
|
||||||
XORQ AX, AX
|
|
||||||
INCL AX
|
|
||||||
CPUID
|
|
||||||
SHRQ $25, CX
|
|
||||||
ANDQ $1, CX
|
|
||||||
MOVB CX, ret+0(FP)
|
|
||||||
RET
|
|
6
vendor/github.com/codahale/aesnicheck/check_asm.go
generated
vendored
6
vendor/github.com/codahale/aesnicheck/check_asm.go
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
// +build amd64
|
|
||||||
|
|
||||||
package aesnicheck
|
|
||||||
|
|
||||||
// HasAESNI returns whether AES-NI is supported by the CPU.
|
|
||||||
func HasAESNI() bool
|
|
8
vendor/github.com/codahale/aesnicheck/check_generic.go
generated
vendored
8
vendor/github.com/codahale/aesnicheck/check_generic.go
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
// +build !amd64
|
|
||||||
|
|
||||||
package aesnicheck
|
|
||||||
|
|
||||||
// HasAESNI returns whether AES-NI is supported by the CPU.
|
|
||||||
func HasAESNI() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
22
vendor/github.com/codahale/aesnicheck/cmd/aesnicheck/aesnicheck.go
generated
vendored
22
vendor/github.com/codahale/aesnicheck/cmd/aesnicheck/aesnicheck.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
// Command aesnicheck queries the CPU for AES-NI support. If AES-NI is supported,
|
|
||||||
// aesnicheck will print "supported" and exit with a status of 0. If AES-NI is
|
|
||||||
// not supported, aesnicheck will print "unsupported" and exit with a status of
|
|
||||||
// -1.
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/codahale/aesnicheck"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if aesnicheck.HasAESNI() {
|
|
||||||
fmt.Println("supported")
|
|
||||||
os.Exit(0)
|
|
||||||
} else {
|
|
||||||
fmt.Println("unsupported")
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
}
|
|
9
vendor/github.com/codahale/aesnicheck/docs.go
generated
vendored
9
vendor/github.com/codahale/aesnicheck/docs.go
generated
vendored
|
@ -1,9 +0,0 @@
|
||||||
// Package aesnicheck provides a simple check to see if crypto/aes is using
|
|
||||||
// AES-NI instructions or if the AES transform is being done in software. AES-NI
|
|
||||||
// is constant-time, which makes it impervious to cache-level timing attacks. For
|
|
||||||
// security-conscious deployments on public cloud infrastructure (Amazon EC2,
|
|
||||||
// Google Compute Engine, Microsoft Azure, etc.) this may be critical.
|
|
||||||
//
|
|
||||||
// See http://eprint.iacr.org/2014/248 for details on cross-VM timing attacks on
|
|
||||||
// AES keys.
|
|
||||||
package aesnicheck
|
|
2
vendor/github.com/google/uuid/hash.go
generated
vendored
2
vendor/github.com/google/uuid/hash.go
generated
vendored
|
@ -27,7 +27,7 @@ var (
|
||||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||||
h.Reset()
|
h.Reset()
|
||||||
h.Write(space[:])
|
h.Write(space[:])
|
||||||
h.Write([]byte(data))
|
h.Write(data)
|
||||||
s := h.Sum(nil)
|
s := h.Sum(nil)
|
||||||
var uuid UUID
|
var uuid UUID
|
||||||
copy(uuid[:], s)
|
copy(uuid[:], s)
|
||||||
|
|
21
vendor/github.com/google/uuid/node.go
generated
vendored
21
vendor/github.com/google/uuid/node.go
generated
vendored
|
@ -5,13 +5,11 @@
|
||||||
package uuid
|
package uuid
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
nodeMu sync.Mutex
|
nodeMu sync.Mutex
|
||||||
interfaces []net.Interface // cached list of interfaces
|
|
||||||
ifname string // name of interface being used
|
ifname string // name of interface being used
|
||||||
nodeID [6]byte // hardware for version 1 UUIDs
|
nodeID [6]byte // hardware for version 1 UUIDs
|
||||||
zeroID [6]byte // nodeID with only 0's
|
zeroID [6]byte // nodeID with only 0's
|
||||||
|
@ -39,21 +37,13 @@ func SetNodeInterface(name string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setNodeInterface(name string) bool {
|
func setNodeInterface(name string) bool {
|
||||||
if interfaces == nil {
|
|
||||||
var err error
|
|
||||||
interfaces, err = net.Interfaces()
|
|
||||||
if err != nil && name != "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ifs := range interfaces {
|
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
if iname != "" && addr != nil {
|
||||||
copy(nodeID[:], ifs.HardwareAddr)
|
ifname = iname
|
||||||
ifname = ifs.Name
|
copy(nodeID[:], addr)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// We found no interfaces with a valid hardware address. If name
|
// We found no interfaces with a valid hardware address. If name
|
||||||
// does not specify a specific interface generate a random Node ID
|
// does not specify a specific interface generate a random Node ID
|
||||||
|
@ -94,9 +84,6 @@ func SetNodeID(id []byte) bool {
|
||||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||||
func (uuid UUID) NodeID() []byte {
|
func (uuid UUID) NodeID() []byte {
|
||||||
if len(uuid) != 16 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var node [6]byte
|
var node [6]byte
|
||||||
copy(node[:], uuid[10:])
|
copy(node[:], uuid[10:])
|
||||||
return node[:]
|
return node[:]
|
||||||
|
|
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright 2017 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
// getHardwareInterface returns nil values for the JS version of the code.
|
||||||
|
// This remvoves the "net" dependency, because it is not used in the browser.
|
||||||
|
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||||
|
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// Copyright 2017 Google Inc. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !js
|
||||||
|
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
var interfaces []net.Interface // cached list of interfaces
|
||||||
|
|
||||||
|
// getHardwareInterface returns the name and hardware address of interface name.
|
||||||
|
// If name is "" then the name and hardware address of one of the system's
|
||||||
|
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||||
|
// there are no interfaces) then "", nil is returned.
|
||||||
|
//
|
||||||
|
// Only addresses of at least 6 bytes are returned.
|
||||||
|
func getHardwareInterface(name string) (string, []byte) {
|
||||||
|
if interfaces == nil {
|
||||||
|
var err error
|
||||||
|
interfaces, err = net.Interfaces()
|
||||||
|
if err != nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, ifs := range interfaces {
|
||||||
|
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||||
|
return ifs.Name, ifs.HardwareAddr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
6
vendor/github.com/google/uuid/time.go
generated
vendored
6
vendor/github.com/google/uuid/time.go
generated
vendored
|
@ -86,7 +86,7 @@ func clockSequence() int {
|
||||||
return int(clockSeq & 0x3fff)
|
return int(clockSeq & 0x3fff)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
|
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||||
// -1 causes a new sequence to be generated.
|
// -1 causes a new sequence to be generated.
|
||||||
func SetClockSequence(seq int) {
|
func SetClockSequence(seq int) {
|
||||||
defer timeMu.Unlock()
|
defer timeMu.Unlock()
|
||||||
|
@ -100,9 +100,9 @@ func setClockSequence(seq int) {
|
||||||
randomBits(b[:]) // clock sequence
|
randomBits(b[:]) // clock sequence
|
||||||
seq = int(b[0])<<8 | int(b[1])
|
seq = int(b[0])<<8 | int(b[1])
|
||||||
}
|
}
|
||||||
old_seq := clockSeq
|
oldSeq := clockSeq
|
||||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||||
if old_seq != clockSeq {
|
if oldSeq != clockSeq {
|
||||||
lasttime = 0
|
lasttime = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
19
vendor/github.com/google/uuid/uuid.go
generated
vendored
19
vendor/github.com/google/uuid/uuid.go
generated
vendored
|
@ -58,11 +58,11 @@ func Parse(s string) (UUID, error) {
|
||||||
14, 16,
|
14, 16,
|
||||||
19, 21,
|
19, 21,
|
||||||
24, 26, 28, 30, 32, 34} {
|
24, 26, 28, 30, 32, 34} {
|
||||||
if v, ok := xtob(s[x], s[x+1]); !ok {
|
v, ok := xtob(s[x], s[x+1])
|
||||||
|
if !ok {
|
||||||
return uuid, errors.New("invalid UUID format")
|
return uuid, errors.New("invalid UUID format")
|
||||||
} else {
|
|
||||||
uuid[i] = v
|
|
||||||
}
|
}
|
||||||
|
uuid[i] = v
|
||||||
}
|
}
|
||||||
return uuid, nil
|
return uuid, nil
|
||||||
}
|
}
|
||||||
|
@ -88,15 +88,22 @@ func ParseBytes(b []byte) (UUID, error) {
|
||||||
14, 16,
|
14, 16,
|
||||||
19, 21,
|
19, 21,
|
||||||
24, 26, 28, 30, 32, 34} {
|
24, 26, 28, 30, 32, 34} {
|
||||||
if v, ok := xtob(b[x], b[x+1]); !ok {
|
v, ok := xtob(b[x], b[x+1])
|
||||||
|
if !ok {
|
||||||
return uuid, errors.New("invalid UUID format")
|
return uuid, errors.New("invalid UUID format")
|
||||||
} else {
|
}
|
||||||
uuid[i] = v
|
uuid[i] = v
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return uuid, nil
|
return uuid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||||
|
// does not have a length of 16. The bytes are copied from the slice.
|
||||||
|
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||||
|
err = uuid.UnmarshalBinary(b)
|
||||||
|
return uuid, err
|
||||||
|
}
|
||||||
|
|
||||||
// Must returns uuid if err is nil and panics otherwise.
|
// Must returns uuid if err is nil and panics otherwise.
|
||||||
func Must(uuid UUID, err error) UUID {
|
func Must(uuid UUID, err error) UUID {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
2
vendor/github.com/google/uuid/version4.go
generated
vendored
2
vendor/github.com/google/uuid/version4.go
generated
vendored
|
@ -14,7 +14,7 @@ func New() UUID {
|
||||||
return Must(NewRandom())
|
return Must(NewRandom())
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRandom returns a Random (Version 4) UUID or panics.
|
// NewRandom returns a Random (Version 4) UUID.
|
||||||
//
|
//
|
||||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||||
// package.
|
// package.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
The MIT License (MIT)
|
The MIT License (MIT)
|
||||||
|
|
||||||
Copyright (c) 2014 Coda Hale
|
Copyright (c) 2015 Klaus Post
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
@ -9,13 +9,14 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
furnished to do so, subject to the following conditions:
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
The above copyright notice and this permission notice shall be included in all
|
||||||
all copies or substantial portions of the Software.
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
THE SOFTWARE.
|
SOFTWARE.
|
||||||
|
|
1030
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
1030
vendor/github.com/klauspost/cpuid/cpuid.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_386.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo
|
||||||
|
|
||||||
|
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuid(SB), 7, $0
|
||||||
|
XORL CX, CX
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+4(FP)
|
||||||
|
MOVL BX, ebx+8(FP)
|
||||||
|
MOVL CX, ecx+12(FP)
|
||||||
|
MOVL DX, edx+16(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuidex(SB), 7, $0
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
MOVL op2+4(FP), CX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func xgetbv(index uint32) (eax, edx uint32)
|
||||||
|
TEXT ·asmXgetbv(SB), 7, $0
|
||||||
|
MOVL index+0(FP), CX
|
||||||
|
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||||
|
MOVL AX, eax+4(FP)
|
||||||
|
MOVL DX, edx+8(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||||
|
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||||
|
MOVL AX, eax+0(FP)
|
||||||
|
MOVL BX, ebx+4(FP)
|
||||||
|
MOVL CX, ecx+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/cpuid_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
//+build amd64,!gccgo
|
||||||
|
|
||||||
|
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuid(SB), 7, $0
|
||||||
|
XORQ CX, CX
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuidex(SB), 7, $0
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
MOVL op2+4(FP), CX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmXgetbv(index uint32) (eax, edx uint32)
|
||||||
|
TEXT ·asmXgetbv(SB), 7, $0
|
||||||
|
MOVL index+0(FP), CX
|
||||||
|
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||||
|
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||||
|
MOVL AX, eax+0(FP)
|
||||||
|
MOVL BX, ebx+4(FP)
|
||||||
|
MOVL CX, ecx+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
17
vendor/github.com/klauspost/cpuid/detect_intel.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo amd64,!gccgo
|
||||||
|
|
||||||
|
package cpuid
|
||||||
|
|
||||||
|
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
func asmXgetbv(index uint32) (eax, edx uint32)
|
||||||
|
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
|
||||||
|
func initCPU() {
|
||||||
|
cpuid = asmCpuid
|
||||||
|
cpuidex = asmCpuidex
|
||||||
|
xgetbv = asmXgetbv
|
||||||
|
rdtscpAsm = asmRdtscpAsm
|
||||||
|
}
|
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
23
vendor/github.com/klauspost/cpuid/detect_ref.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64,!386 gccgo
|
||||||
|
|
||||||
|
package cpuid
|
||||||
|
|
||||||
|
func initCPU() {
|
||||||
|
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
xgetbv = func(index uint32) (eax, edx uint32) {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
}
|
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
Normal file
4
vendor/github.com/klauspost/cpuid/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
package cpuid
|
||||||
|
|
||||||
|
//go:generate go run private-gen.go
|
||||||
|
//go:generate gofmt -w ./private
|
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
476
vendor/github.com/klauspost/cpuid/private-gen.go
generated
vendored
Normal file
|
@ -0,0 +1,476 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"go/ast"
|
||||||
|
"go/parser"
|
||||||
|
"go/printer"
|
||||||
|
"go/token"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
var inFiles = []string{"cpuid.go", "cpuid_test.go"}
|
||||||
|
var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
|
||||||
|
var fileSet = token.NewFileSet()
|
||||||
|
var reWrites = []rewrite{
|
||||||
|
initRewrite("CPUInfo -> cpuInfo"),
|
||||||
|
initRewrite("Vendor -> vendor"),
|
||||||
|
initRewrite("Flags -> flags"),
|
||||||
|
initRewrite("Detect -> detect"),
|
||||||
|
initRewrite("CPU -> cpu"),
|
||||||
|
}
|
||||||
|
var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
|
||||||
|
// cpuid_test.go
|
||||||
|
"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var excludePrefixes = []string{"test", "benchmark"}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
Package := "private"
|
||||||
|
parserMode := parser.ParseComments
|
||||||
|
exported := make(map[string]rewrite)
|
||||||
|
for _, file := range inFiles {
|
||||||
|
in, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("opening input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := ioutil.ReadAll(in)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("reading input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("parsing input", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rw := range reWrites {
|
||||||
|
astfile = rw(astfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inspect the AST and print all identifiers and literals.
|
||||||
|
var startDecl token.Pos
|
||||||
|
var endDecl token.Pos
|
||||||
|
ast.Inspect(astfile, func(n ast.Node) bool {
|
||||||
|
var s string
|
||||||
|
switch x := n.(type) {
|
||||||
|
case *ast.Ident:
|
||||||
|
if x.IsExported() {
|
||||||
|
t := strings.ToLower(x.Name)
|
||||||
|
for _, pre := range excludePrefixes {
|
||||||
|
if strings.HasPrefix(t, pre) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if excludeNames[t] != true {
|
||||||
|
//if x.Pos() > startDecl && x.Pos() < endDecl {
|
||||||
|
exported[x.Name] = initRewrite(x.Name + " -> " + t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case *ast.GenDecl:
|
||||||
|
if x.Tok == token.CONST && x.Lparen > 0 {
|
||||||
|
startDecl = x.Lparen
|
||||||
|
endDecl = x.Rparen
|
||||||
|
// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s != "" {
|
||||||
|
fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, rw := range exported {
|
||||||
|
astfile = rw(astfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
printer.Fprint(&buf, fileSet, astfile)
|
||||||
|
|
||||||
|
// Remove package documentation and insert information
|
||||||
|
s := buf.String()
|
||||||
|
ind := strings.Index(buf.String(), "\npackage cpuid")
|
||||||
|
s = s[ind:]
|
||||||
|
s = "// Generated, DO NOT EDIT,\n" +
|
||||||
|
"// but copy it to your own project and rename the package.\n" +
|
||||||
|
"// See more at http://github.com/klauspost/cpuid\n" +
|
||||||
|
s
|
||||||
|
|
||||||
|
outputName := Package + string(os.PathSeparator) + file
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(outputName, []byte(s), 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("writing output: %s", err)
|
||||||
|
}
|
||||||
|
log.Println("Generated", outputName)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range copyFiles {
|
||||||
|
dst := ""
|
||||||
|
if strings.HasPrefix(file, "cpuid") {
|
||||||
|
dst = Package + string(os.PathSeparator) + file
|
||||||
|
} else {
|
||||||
|
dst = Package + string(os.PathSeparator) + "cpuid_" + file
|
||||||
|
}
|
||||||
|
err := copyFile(file, dst)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("copying file: %s", err)
|
||||||
|
}
|
||||||
|
log.Println("Copied", dst)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies a file from src to dst. If src and dst files exist, and are
|
||||||
|
// the same, then return success. Copy the file contents from src to dst.
|
||||||
|
func copyFile(src, dst string) (err error) {
|
||||||
|
sfi, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !sfi.Mode().IsRegular() {
|
||||||
|
// cannot copy non-regular files (e.g., directories,
|
||||||
|
// symlinks, devices, etc.)
|
||||||
|
return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
|
||||||
|
}
|
||||||
|
dfi, err := os.Stat(dst)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !(dfi.Mode().IsRegular()) {
|
||||||
|
return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
|
||||||
|
}
|
||||||
|
if os.SameFile(sfi, dfi) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = copyFileContents(src, dst)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFileContents copies the contents of the file named src to the file named
|
||||||
|
// by dst. The file will be created if it does not already exist. If the
|
||||||
|
// destination file exists, all it's contents will be replaced by the contents
|
||||||
|
// of the source file.
|
||||||
|
func copyFileContents(src, dst string) (err error) {
|
||||||
|
in, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
out, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
cerr := out.Close()
|
||||||
|
if err == nil {
|
||||||
|
err = cerr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if _, err = io.Copy(out, in); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err = out.Sync()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type rewrite func(*ast.File) *ast.File
|
||||||
|
|
||||||
|
// Mostly copied from gofmt
|
||||||
|
func initRewrite(rewriteRule string) rewrite {
|
||||||
|
f := strings.Split(rewriteRule, "->")
|
||||||
|
if len(f) != 2 {
|
||||||
|
fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
pattern := parseExpr(f[0], "pattern")
|
||||||
|
replace := parseExpr(f[1], "replacement")
|
||||||
|
return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseExpr parses s as an expression.
|
||||||
|
// It might make sense to expand this to allow statement patterns,
|
||||||
|
// but there are problems with preserving formatting and also
|
||||||
|
// with what a wildcard for a statement looks like.
|
||||||
|
func parseExpr(s, what string) ast.Expr {
|
||||||
|
x, err := parser.ParseExpr(s)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
|
||||||
|
os.Exit(2)
|
||||||
|
}
|
||||||
|
return x
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep this function for debugging.
|
||||||
|
/*
|
||||||
|
func dump(msg string, val reflect.Value) {
|
||||||
|
fmt.Printf("%s:\n", msg)
|
||||||
|
ast.Print(fileSet, val.Interface())
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
|
||||||
|
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
|
||||||
|
cmap := ast.NewCommentMap(fileSet, p, p.Comments)
|
||||||
|
m := make(map[string]reflect.Value)
|
||||||
|
pat := reflect.ValueOf(pattern)
|
||||||
|
repl := reflect.ValueOf(replace)
|
||||||
|
|
||||||
|
var rewriteVal func(val reflect.Value) reflect.Value
|
||||||
|
rewriteVal = func(val reflect.Value) reflect.Value {
|
||||||
|
// don't bother if val is invalid to start with
|
||||||
|
if !val.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
for k := range m {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
val = apply(rewriteVal, val)
|
||||||
|
if match(m, pat, val) {
|
||||||
|
val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
|
||||||
|
r.Comments = cmap.Filter(r).Comments() // recreate comments list
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
|
||||||
|
func set(x, y reflect.Value) {
|
||||||
|
// don't bother if x cannot be set or y is invalid
|
||||||
|
if !x.CanSet() || !y.IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if x := recover(); x != nil {
|
||||||
|
if s, ok := x.(string); ok &&
|
||||||
|
(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
|
||||||
|
// x cannot be set to y - ignore this rewrite
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic(x)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
x.Set(y)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values/types for special cases.
|
||||||
|
var (
|
||||||
|
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
|
||||||
|
scopePtrNil = reflect.ValueOf((*ast.Scope)(nil))
|
||||||
|
|
||||||
|
identType = reflect.TypeOf((*ast.Ident)(nil))
|
||||||
|
objectPtrType = reflect.TypeOf((*ast.Object)(nil))
|
||||||
|
positionType = reflect.TypeOf(token.NoPos)
|
||||||
|
callExprType = reflect.TypeOf((*ast.CallExpr)(nil))
|
||||||
|
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
|
||||||
|
)
|
||||||
|
|
||||||
|
// apply replaces each AST field x in val with f(x), returning val.
|
||||||
|
// To avoid extra conversions, f operates on the reflect.Value form.
|
||||||
|
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
|
||||||
|
if !val.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// *ast.Objects introduce cycles and are likely incorrect after
|
||||||
|
// rewrite; don't follow them but replace with nil instead
|
||||||
|
if val.Type() == objectPtrType {
|
||||||
|
return objectPtrNil
|
||||||
|
}
|
||||||
|
|
||||||
|
// similarly for scopes: they are likely incorrect after a rewrite;
|
||||||
|
// replace them with nil
|
||||||
|
if val.Type() == scopePtrType {
|
||||||
|
return scopePtrNil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := reflect.Indirect(val); v.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
e := v.Index(i)
|
||||||
|
set(e, f(e))
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
e := v.Field(i)
|
||||||
|
set(e, f(e))
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
e := v.Elem()
|
||||||
|
set(v, f(e))
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWildcard(s string) bool {
|
||||||
|
rune, size := utf8.DecodeRuneInString(s)
|
||||||
|
return size == len(s) && unicode.IsLower(rune)
|
||||||
|
}
|
||||||
|
|
||||||
|
// match returns true if pattern matches val,
|
||||||
|
// recording wildcard submatches in m.
|
||||||
|
// If m == nil, match checks whether pattern == val.
|
||||||
|
func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
|
||||||
|
// Wildcard matches any expression. If it appears multiple
|
||||||
|
// times in the pattern, it must match the same expression
|
||||||
|
// each time.
|
||||||
|
if m != nil && pattern.IsValid() && pattern.Type() == identType {
|
||||||
|
name := pattern.Interface().(*ast.Ident).Name
|
||||||
|
if isWildcard(name) && val.IsValid() {
|
||||||
|
// wildcards only match valid (non-nil) expressions.
|
||||||
|
if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
|
||||||
|
if old, ok := m[name]; ok {
|
||||||
|
return match(nil, old, val)
|
||||||
|
}
|
||||||
|
m[name] = val
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, pattern and val must match recursively.
|
||||||
|
if !pattern.IsValid() || !val.IsValid() {
|
||||||
|
return !pattern.IsValid() && !val.IsValid()
|
||||||
|
}
|
||||||
|
if pattern.Type() != val.Type() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special cases.
|
||||||
|
switch pattern.Type() {
|
||||||
|
case identType:
|
||||||
|
// For identifiers, only the names need to match
|
||||||
|
// (and none of the other *ast.Object information).
|
||||||
|
// This is a common case, handle it all here instead
|
||||||
|
// of recursing down any further via reflection.
|
||||||
|
p := pattern.Interface().(*ast.Ident)
|
||||||
|
v := val.Interface().(*ast.Ident)
|
||||||
|
return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
|
||||||
|
case objectPtrType, positionType:
|
||||||
|
// object pointers and token positions always match
|
||||||
|
return true
|
||||||
|
case callExprType:
|
||||||
|
// For calls, the Ellipsis fields (token.Position) must
|
||||||
|
// match since that is how f(x) and f(x...) are different.
|
||||||
|
// Check them here but fall through for the remaining fields.
|
||||||
|
p := pattern.Interface().(*ast.CallExpr)
|
||||||
|
v := val.Interface().(*ast.CallExpr)
|
||||||
|
if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := reflect.Indirect(pattern)
|
||||||
|
v := reflect.Indirect(val)
|
||||||
|
if !p.IsValid() || !v.IsValid() {
|
||||||
|
return !p.IsValid() && !v.IsValid()
|
||||||
|
}
|
||||||
|
|
||||||
|
switch p.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
if p.Len() != v.Len() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < p.Len(); i++ {
|
||||||
|
if !match(m, p.Index(i), v.Index(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < p.NumField(); i++ {
|
||||||
|
if !match(m, p.Field(i), v.Field(i)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
return match(m, p.Elem(), v.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle token integers, etc.
|
||||||
|
return p.Interface() == v.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// subst returns a copy of pattern with values from m substituted in place
|
||||||
|
// of wildcards and pos used as the position of tokens from the pattern.
|
||||||
|
// if m == nil, subst returns a copy of pattern and doesn't change the line
|
||||||
|
// number information.
|
||||||
|
func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
|
||||||
|
if !pattern.IsValid() {
|
||||||
|
return reflect.Value{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wildcard gets replaced with map value.
|
||||||
|
if m != nil && pattern.Type() == identType {
|
||||||
|
name := pattern.Interface().(*ast.Ident).Name
|
||||||
|
if isWildcard(name) {
|
||||||
|
if old, ok := m[name]; ok {
|
||||||
|
return subst(nil, old, reflect.Value{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pos.IsValid() && pattern.Type() == positionType {
|
||||||
|
// use new position only if old position was valid in the first place
|
||||||
|
if old := pattern.Interface().(token.Pos); !old.IsValid() {
|
||||||
|
return pattern
|
||||||
|
}
|
||||||
|
return pos
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise copy.
|
||||||
|
switch p := pattern; p.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
|
||||||
|
for i := 0; i < p.Len(); i++ {
|
||||||
|
v.Index(i).Set(subst(m, p.Index(i), pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
for i := 0; i < p.NumField(); i++ {
|
||||||
|
v.Field(i).Set(subst(m, p.Field(i), pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
if elem := p.Elem(); elem.IsValid() {
|
||||||
|
v.Set(subst(m, elem, pos).Addr())
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
v := reflect.New(p.Type()).Elem()
|
||||||
|
if elem := p.Elem(); elem.IsValid() {
|
||||||
|
v.Set(subst(m, elem, pos))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return pattern
|
||||||
|
}
|
1024
vendor/github.com/klauspost/cpuid/private/cpuid.go
generated
vendored
Normal file
1024
vendor/github.com/klauspost/cpuid/private/cpuid.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
42
vendor/github.com/klauspost/cpuid/private/cpuid_386.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/private/cpuid_386.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo
|
||||||
|
|
||||||
|
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuid(SB), 7, $0
|
||||||
|
XORL CX, CX
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+4(FP)
|
||||||
|
MOVL BX, ebx+8(FP)
|
||||||
|
MOVL CX, ecx+12(FP)
|
||||||
|
MOVL DX, edx+16(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuidex(SB), 7, $0
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
MOVL op2+4(FP), CX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func xgetbv(index uint32) (eax, edx uint32)
|
||||||
|
TEXT ·asmXgetbv(SB), 7, $0
|
||||||
|
MOVL index+0(FP), CX
|
||||||
|
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||||
|
MOVL AX, eax+4(FP)
|
||||||
|
MOVL DX, edx+8(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||||
|
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||||
|
MOVL AX, eax+0(FP)
|
||||||
|
MOVL BX, ebx+4(FP)
|
||||||
|
MOVL CX, ecx+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
42
vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s
generated
vendored
Normal file
42
vendor/github.com/klauspost/cpuid/private/cpuid_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
//+build amd64,!gccgo
|
||||||
|
|
||||||
|
// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuid(SB), 7, $0
|
||||||
|
XORQ CX, CX
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmCpuidex(SB), 7, $0
|
||||||
|
MOVL op+0(FP), AX
|
||||||
|
MOVL op2+4(FP), CX
|
||||||
|
CPUID
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL BX, ebx+12(FP)
|
||||||
|
MOVL CX, ecx+16(FP)
|
||||||
|
MOVL DX, edx+20(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmXgetbv(index uint32) (eax, edx uint32)
|
||||||
|
TEXT ·asmXgetbv(SB), 7, $0
|
||||||
|
MOVL index+0(FP), CX
|
||||||
|
BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
|
||||||
|
MOVL AX, eax+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
TEXT ·asmRdtscpAsm(SB), 7, $0
|
||||||
|
BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
|
||||||
|
MOVL AX, eax+0(FP)
|
||||||
|
MOVL BX, ebx+4(FP)
|
||||||
|
MOVL CX, ecx+8(FP)
|
||||||
|
MOVL DX, edx+12(FP)
|
||||||
|
RET
|
17
vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go
generated
vendored
Normal file
17
vendor/github.com/klauspost/cpuid/private/cpuid_detect_intel.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo amd64,!gccgo
|
||||||
|
|
||||||
|
package cpuid
|
||||||
|
|
||||||
|
func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
|
||||||
|
func asmXgetbv(index uint32) (eax, edx uint32)
|
||||||
|
func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
|
||||||
|
|
||||||
|
func initCPU() {
|
||||||
|
cpuid = asmCpuid
|
||||||
|
cpuidex = asmCpuidex
|
||||||
|
xgetbv = asmXgetbv
|
||||||
|
rdtscpAsm = asmRdtscpAsm
|
||||||
|
}
|
23
vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go
generated
vendored
Normal file
23
vendor/github.com/klauspost/cpuid/private/cpuid_detect_ref.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64,!386 gccgo
|
||||||
|
|
||||||
|
package cpuid
|
||||||
|
|
||||||
|
func initCPU() {
|
||||||
|
cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
xgetbv = func(index uint32) (eax, edx uint32) {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
|
||||||
|
return 0, 0, 0, 0
|
||||||
|
}
|
||||||
|
}
|
10
vendor/manifest
vendored
10
vendor/manifest
vendored
|
@ -88,7 +88,7 @@
|
||||||
"importpath": "github.com/google/uuid",
|
"importpath": "github.com/google/uuid",
|
||||||
"repository": "https://github.com/google/uuid",
|
"repository": "https://github.com/google/uuid",
|
||||||
"vcs": "git",
|
"vcs": "git",
|
||||||
"revision": "7e072fc3a7be179aee6d3359e46015aa8c995314",
|
"revision": "dec09d789f3dba190787f8b4454c7d3c936fed9e",
|
||||||
"branch": "master",
|
"branch": "master",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
@ -125,6 +125,14 @@
|
||||||
"path": "/basic",
|
"path": "/basic",
|
||||||
"notests": true
|
"notests": true
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"importpath": "github.com/klauspost/cpuid",
|
||||||
|
"repository": "https://github.com/klauspost/cpuid",
|
||||||
|
"vcs": "git",
|
||||||
|
"revision": "ae832f27941af41db13bd6d8efd2493e3b22415a",
|
||||||
|
"branch": "master",
|
||||||
|
"notests": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"importpath": "github.com/lucas-clemente/aes12",
|
"importpath": "github.com/lucas-clemente/aes12",
|
||||||
"repository": "https://github.com/lucas-clemente/aes12",
|
"repository": "https://github.com/lucas-clemente/aes12",
|
||||||
|
|
Loading…
Reference in a new issue