diff --git a/Dockerfile b/Dockerfile
index 409ee414..4c134725 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-ARG GOLANG_IMAGE=golang:1.22-alpine3.20
+ARG GOLANG_IMAGE=golang:1.23-alpine3.20
ARG BUILD_IMAGE=alpine:3.20
# Cross-Compilation
diff --git a/app/api/api.go b/app/api/api.go
index 00470ff0..db34aa62 100644
--- a/app/api/api.go
+++ b/app/api/api.go
@@ -36,8 +36,9 @@ import (
"github.com/datarhei/core/v16/monitor"
"github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/prometheus"
- "github.com/datarhei/core/v16/psutil"
"github.com/datarhei/core/v16/resources"
+ "github.com/datarhei/core/v16/resources/psutil"
+ "github.com/datarhei/core/v16/resources/psutil/gpu/nvidia"
"github.com/datarhei/core/v16/restream"
restreamapp "github.com/datarhei/core/v16/restream/app"
"github.com/datarhei/core/v16/restream/replace"
@@ -127,8 +128,6 @@ type api struct {
state string
undoMaxprocs func()
-
- process psutil.Process
}
// ErrConfigReload is an error returned to indicate that a reload of
@@ -370,17 +369,23 @@ func (a *api) start(ctx context.Context) error {
debug.SetMemoryLimit(math.MaxInt64)
}
+ psutil, err := psutil.New("", nvidia.New(""))
+ if err != nil {
+ return fmt.Errorf("failed to initialize psutils: %w", err)
+ }
+
resources, err := resources.New(resources.Config{
- MaxCPU: cfg.Resources.MaxCPUUsage,
- MaxMemory: cfg.Resources.MaxMemoryUsage,
- Logger: a.log.logger.core.WithComponent("Resources"),
+ MaxCPU: cfg.Resources.MaxCPUUsage,
+ MaxMemory: cfg.Resources.MaxMemoryUsage,
+ MaxGPU: cfg.Resources.MaxGPUUsage,
+ MaxGPUMemory: cfg.Resources.MaxGPUMemoryUsage,
+ Logger: a.log.logger.core.WithComponent("Resources"),
+ PSUtil: psutil,
})
if err != nil {
return fmt.Errorf("failed to initialize resource manager: %w", err)
}
- resources.Start()
-
a.resources = resources
if cfg.Sessions.Enable {
@@ -507,6 +512,7 @@ func (a *api) start(ctx context.Context) error {
ValidatorOutput: validatorOut,
Portrange: portrange,
Collector: a.sessions.Collector("ffmpeg"),
+ Resource: a.resources,
})
if err != nil {
return fmt.Errorf("unable to create ffmpeg: %w", err)
@@ -848,6 +854,7 @@ func (a *api) start(ctx context.Context) error {
"type": "mem",
"name": "mem",
}),
+ Storage: "swiss",
}
var memfs fs.Filesystem = nil
if len(cfg.Storage.Memory.Backup.Dir) != 0 {
@@ -1228,13 +1235,15 @@ func (a *api) start(ctx context.Context) error {
metrics.Register(monitor.NewUptimeCollector())
metrics.Register(monitor.NewCPUCollector(a.resources))
metrics.Register(monitor.NewMemCollector(a.resources))
- metrics.Register(monitor.NewNetCollector())
- metrics.Register(monitor.NewDiskCollector(a.diskfs.Metadata("base")))
+ metrics.Register(monitor.NewGPUCollector(a.resources))
+ metrics.Register(monitor.NewNetCollector(a.resources))
+ metrics.Register(monitor.NewDiskCollector(a.diskfs.Metadata("base"), a.resources))
metrics.Register(monitor.NewFilesystemCollector("diskfs", a.diskfs))
metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs))
for name, fs := range a.s3fs {
metrics.Register(monitor.NewFilesystemCollector(name, fs))
}
+ metrics.Register(monitor.NewSelfCollector())
metrics.Register(monitor.NewRestreamCollector(a.restream))
metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg))
metrics.Register(monitor.NewSessionCollector(a.sessions, []string{}))
@@ -1428,7 +1437,6 @@ func (a *api) start(ctx context.Context) error {
Password: "",
DefaultFile: "index.html",
DefaultContentType: "text/html",
- Gzip: true,
Filesystem: a.diskfs,
Cache: a.cache,
},
@@ -1441,7 +1449,6 @@ func (a *api) start(ctx context.Context) error {
Password: cfg.Storage.Memory.Auth.Password,
DefaultFile: "",
DefaultContentType: "application/data",
- Gzip: true,
Filesystem: a.memfs,
Cache: nil,
},
@@ -1457,7 +1464,6 @@ func (a *api) start(ctx context.Context) error {
Password: s3.Auth.Password,
DefaultFile: "",
DefaultContentType: "application/data",
- Gzip: true,
Filesystem: a.s3fs[s3.Name],
Cache: a.cache,
})
@@ -1470,7 +1476,7 @@ func (a *api) start(ctx context.Context) error {
Restream: a.restream,
Metrics: a.metrics,
Prometheus: a.prom,
- MimeTypesFile: cfg.Storage.MimeTypes,
+ MimeTypesFile: cfg.Storage.MimeTypesFile,
Filesystems: httpfilesystems,
IPLimiter: iplimiter,
Profiling: cfg.Debug.Profiling,
@@ -1501,6 +1507,12 @@ func (a *api) start(ctx context.Context) error {
return false
},
+ Resources: a.resources,
+ Compress: http.CompressConfig{
+ Encoding: cfg.Compress.Encoding,
+ MimeTypes: cfg.Compress.MimeTypes,
+ MinLength: cfg.Compress.MinLength,
+ },
}
mainserverhandler, err := http.NewServer(serverConfig)
@@ -1882,11 +1894,6 @@ func (a *api) stop() {
a.service = nil
}
- if a.process != nil {
- a.process.Stop()
- a.process = nil
- }
-
// Unregister all collectors
if a.metrics != nil {
a.metrics.UnregisterAll()
@@ -1909,7 +1916,7 @@ func (a *api) stop() {
// Stop resource observer
if a.resources != nil {
- a.resources.Stop()
+ a.resources.Cancel()
}
// Stop the session tracker
diff --git a/cluster/about.go b/cluster/about.go
index 3356faec..60585a38 100644
--- a/cluster/about.go
+++ b/cluster/about.go
@@ -18,18 +18,29 @@ type ClusterRaft struct {
}
type ClusterNodeResources struct {
- IsThrottling bool // Whether this core is currently throttling
- NCPU float64 // Number of CPU on this node
- CPU float64 // Current CPU load, 0-100*ncpu
- CPULimit float64 // Defined CPU load limit, 0-100*ncpu
- CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu
- Mem uint64 // Currently used memory in bytes
- MemLimit uint64 // Defined memory limit in bytes
- MemTotal uint64 // Total available memory in bytes
- MemCore uint64 // Current used memory of the core itself in bytes
+ IsThrottling bool // Whether this core is currently throttling
+ NCPU float64 // Number of CPU on this node
+ CPU float64 // Current CPU load, 0-100*ncpu
+ CPULimit float64 // Defined CPU load limit, 0-100*ncpu
+ CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu
+ Mem uint64 // Currently used memory in bytes
+ MemLimit uint64 // Defined memory limit in bytes
+ MemTotal uint64 // Total available memory in bytes
+ MemCore uint64 // Current used memory of the core itself in bytes
+ GPU []ClusterNodeGPUResources // GPU resources
Error error
}
+type ClusterNodeGPUResources struct {
+ Mem uint64 // Currently used memory in bytes
+ MemLimit uint64 // Defined memory limit in bytes
+ MemTotal uint64 // Total available memory in bytes
+ Usage float64 // Current general usage, 0-100
+ UsageLimit float64 // Defined general usage limit, 0-100
+ Encoder float64 // Current encoder usage, 0-100
+ Decoder float64 // Current decoder usage, 0-100
+}
+
type ClusterNode struct {
ID string
Name string
@@ -157,6 +168,19 @@ func (c *cluster) About() (ClusterAbout, error) {
},
}
+ if len(nodeAbout.Resources.GPU) != 0 {
+ node.Resources.GPU = make([]ClusterNodeGPUResources, len(nodeAbout.Resources.GPU))
+ for i, gpu := range nodeAbout.Resources.GPU {
+ node.Resources.GPU[i].Mem = gpu.Mem
+ node.Resources.GPU[i].MemLimit = gpu.MemLimit
+ node.Resources.GPU[i].MemTotal = gpu.MemTotal
+ node.Resources.GPU[i].Usage = gpu.Usage
+ node.Resources.GPU[i].UsageLimit = gpu.UsageLimit
+ node.Resources.GPU[i].Encoder = gpu.Encoder
+ node.Resources.GPU[i].Decoder = gpu.Decoder
+ }
+ }
+
if s, ok := serversMap[nodeAbout.ID]; ok {
node.Voter = s.Voter
node.Leader = s.Leader
diff --git a/cluster/api.go b/cluster/api.go
index de2f865b..7c99f9b8 100644
--- a/cluster/api.go
+++ b/cluster/api.go
@@ -171,7 +171,7 @@ func (a *api) Version(c echo.Context) error {
// @Tags v1.0.0
// @ID cluster-1-about
// @Produce json
-// @Success 200 {string} About
+// @Success 200 {object} client.AboutResponse
// @Success 500 {object} Error
// @Router /v1/about [get]
func (a *api) About(c echo.Context) error {
@@ -195,6 +195,19 @@ func (a *api) About(c echo.Context) error {
},
}
+ if len(resources.GPU.GPU) != 0 {
+ about.Resources.GPU = make([]client.AboutResponseGPUResources, len(resources.GPU.GPU))
+ for i, gpu := range resources.GPU.GPU {
+ about.Resources.GPU[i].Mem = gpu.MemoryUsed
+ about.Resources.GPU[i].MemLimit = gpu.MemoryLimit
+ about.Resources.GPU[i].MemTotal = gpu.MemoryTotal
+ about.Resources.GPU[i].Usage = gpu.Usage
+ about.Resources.GPU[i].UsageLimit = gpu.UsageLimit
+ about.Resources.GPU[i].Encoder = gpu.Encoder
+ about.Resources.GPU[i].Decoder = gpu.Decoder
+ }
+ }
+
if err != nil {
about.Resources.Error = err.Error()
}
@@ -400,7 +413,7 @@ func (a *api) ProcessAdd(c echo.Context) error {
// @Param id path string true "Process ID"
// @Param domain query string false "Domain to act on"
// @Param X-Cluster-Origin header string false "Origin ID of request"
-// @Success 200 {string} string
+// @Success 200 {object} client.GetProcessResponse
// @Failure 404 {object} Error
// @Failure 500 {object} Error
// @Failure 508 {object} Error
diff --git a/cluster/client/client.go b/cluster/client/client.go
index 84ab0230..214bf34d 100644
--- a/cluster/client/client.go
+++ b/cluster/client/client.go
@@ -83,17 +83,28 @@ type AboutResponse struct {
Resources AboutResponseResources `json:"resources"`
}
+type AboutResponseGPUResources struct {
+ Mem uint64 `json:"memory_bytes"` // Currently used memory in bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
+ Usage float64 `json:"usage"` // Current general usage, 0-100
+ Encoder float64 `json:"encoder"` // Current encoder usage, 0-100
+ Decoder float64 `json:"decoder"` // Current decoder usage, 0-100
+ UsageLimit float64 `json:"usage_limit"` // Defined general usage limit, 0-100
+}
+
type AboutResponseResources struct {
- IsThrottling bool `json:"is_throttling"` // Whether this core is currently throttling
- NCPU float64 `json:"ncpu"` // Number of CPU on this node
- CPU float64 `json:"cpu"` // Current CPU load, 0-100*ncpu
- CPULimit float64 `json:"cpu_limit"` // Defined CPU load limit, 0-100*ncpu
- CPUCore float64 `json:"cpu_core"` // Current CPU load of the core itself, 0-100*ncpu
- Mem uint64 `json:"memory_bytes"` // Currently used memory in bytes
- MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
- MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
- MemCore uint64 `json:"memory_core_bytes"` // Current used memory of the core itself in bytes
- Error string `json:"error"` // Last error
+ IsThrottling bool `json:"is_throttling"` // Whether this core is currently throttling
+ NCPU float64 `json:"ncpu"` // Number of CPU on this node
+ CPU float64 `json:"cpu"` // Current CPU load, 0-100*ncpu
+ CPULimit float64 `json:"cpu_limit"` // Defined CPU load limit, 0-100*ncpu
+ CPUCore float64 `json:"cpu_core"` // Current CPU load of the core itself, 0-100*ncpu
+ Mem uint64 `json:"memory_bytes"` // Currently used memory in bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
+ MemCore uint64 `json:"memory_core_bytes"` // Current used memory of the core itself in bytes
+ GPU []AboutResponseGPUResources `json:"gpu"` // Currently used GPU resources
+ Error string `json:"error"` // Last error
}
type SetNodeStateRequest struct {
diff --git a/cluster/cluster.go b/cluster/cluster.go
index 56d00a40..ccb9e65c 100644
--- a/cluster/cluster.go
+++ b/cluster/cluster.go
@@ -641,7 +641,7 @@ func (c *cluster) CertManager() autocert.Manager {
}
func (c *cluster) Shutdown() error {
- c.logger.Info().Log("Shutting down cluster")
+ c.logger.Info().Log("Shutting down cluster ...")
c.shutdownLock.Lock()
defer c.shutdownLock.Unlock()
@@ -652,9 +652,14 @@ func (c *cluster) Shutdown() error {
c.shutdown = true
close(c.shutdownCh)
+ c.logger.Info().Log("Waiting for all routines to exit ...")
+
c.shutdownWg.Wait()
+ c.logger.Info().Log("All routines exited ...")
+
if c.manager != nil {
+ c.logger.Info().Log("Shutting down node manager ...")
c.manager.NodesClear()
}
@@ -662,16 +667,17 @@ func (c *cluster) Shutdown() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
+ c.logger.Info().Log("Shutting down API ...")
+
c.api.Shutdown(ctx)
}
if c.raft != nil {
+ c.logger.Info().Log("Shutting down raft ...")
c.raft.Shutdown()
}
- // TODO: here might some situations, where the manager is still need from the synchronize loop and will run into a panic
- c.manager = nil
- c.raft = nil
+ c.logger.Info().Log("Cluster stopped")
return nil
}
@@ -1055,7 +1061,7 @@ func (c *cluster) trackLeaderChanges() {
if !isNodeInCluster {
// We're not anymore part of the cluster, shutdown
c.logger.Warn().WithField("id", c.nodeID).Log("This node left the cluster. Shutting down.")
- c.Shutdown()
+ go c.Shutdown()
}
case <-c.shutdownCh:
diff --git a/cluster/docs/ClusterAPI_docs.go b/cluster/docs/ClusterAPI_docs.go
index be2fd046..ce72fb69 100644
--- a/cluster/docs/ClusterAPI_docs.go
+++ b/cluster/docs/ClusterAPI_docs.go
@@ -1,5 +1,4 @@
-// Code generated by swaggo/swag. DO NOT EDIT.
-
+// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
@@ -66,7 +65,7 @@ const docTemplateClusterAPI = `{
"200": {
"description": "OK",
"schema": {
- "type": "string"
+ "$ref": "#/definitions/client.AboutResponse"
}
},
"500": {
@@ -769,6 +768,64 @@ const docTemplateClusterAPI = `{
}
},
"/v1/process/{id}": {
+ "get": {
+ "description": "Get a process from the cluster DB",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "v1.0.0"
+ ],
+ "summary": "Get a process",
+ "operationId": "cluster-1-get-process",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Process ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Domain to act on",
+ "name": "domain",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Origin ID of request",
+ "name": "X-Cluster-Origin",
+ "in": "header"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/client.GetProcessResponse"
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ },
+ "508": {
+ "description": "Loop Detected",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ }
+ }
+ },
"put": {
"description": "Replace an existing process in the cluster DB",
"consumes": [
@@ -1225,32 +1282,6 @@ const docTemplateClusterAPI = `{
}
},
"definitions": {
- "access.Policy": {
- "type": "object",
- "properties": {
- "actions": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "domain": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "resource": {
- "type": "string"
- },
- "types": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
"app.Config": {
"type": "object",
"properties": {
@@ -1276,6 +1307,14 @@ const docTemplateClusterAPI = `{
"description": "percent",
"type": "number"
},
+ "limitGPU": {
+ "description": "GPU limits",
+ "allOf": [
+ {
+ "$ref": "#/definitions/app.ConfigLimitGPU"
+ }
+ ]
+ },
"limitMemory": {
"description": "bytes",
"type": "integer"
@@ -1285,7 +1324,7 @@ const docTemplateClusterAPI = `{
"type": "integer"
},
"logPatterns": {
- "description": "will we interpreted as regular expressions",
+ "description": "will be interpreted as regular expressions",
"type": "array",
"items": {
"type": "string"
@@ -1370,6 +1409,132 @@ const docTemplateClusterAPI = `{
}
}
},
+ "app.ConfigLimitGPU": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "encoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "memory": {
+ "description": "bytes",
+ "type": "integer"
+ },
+ "usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "client.AboutResponse": {
+ "type": "object",
+ "properties": {
+ "address": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "resources": {
+ "$ref": "#/definitions/client.AboutResponseResources"
+ },
+ "started_at": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
+ "client.AboutResponseGPUResources": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "memory_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "usage": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "client.AboutResponseResources": {
+ "type": "object",
+ "properties": {
+ "cpu": {
+ "description": "Current CPU load, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_core": {
+ "description": "Current CPU load of the core itself, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_limit": {
+ "description": "Defined CPU load limit, 0-100*ncpu",
+ "type": "number"
+ },
+ "error": {
+ "description": "Last error",
+ "type": "string"
+ },
+ "gpu": {
+ "description": "Currently used GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/client.AboutResponseGPUResources"
+ }
+ },
+ "is_throttling": {
+ "description": "Whether this core is currently throttling",
+ "type": "boolean"
+ },
+ "memory_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "memory_core_bytes": {
+ "description": "Current used memory of the core itself in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "ncpu": {
+ "description": "Number of CPU on this node",
+ "type": "number"
+ }
+ }
+ },
"client.AddIdentityRequest": {
"type": "object",
"properties": {
@@ -1386,6 +1551,17 @@ const docTemplateClusterAPI = `{
}
}
},
+ "client.GetProcessResponse": {
+ "type": "object",
+ "properties": {
+ "nodeid": {
+ "type": "string"
+ },
+ "process": {
+ "$ref": "#/definitions/github_com_datarhei_core_v16_cluster_store.Process"
+ }
+ }
+ },
"client.JoinRequest": {
"type": "object",
"properties": {
@@ -1444,7 +1620,7 @@ const docTemplateClusterAPI = `{
"policies": {
"type": "array",
"items": {
- "$ref": "#/definitions/access.Policy"
+ "$ref": "#/definitions/policy.Policy"
}
}
}
@@ -1638,6 +1814,26 @@ const docTemplateClusterAPI = `{
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -1830,6 +2026,14 @@ const docTemplateClusterAPI = `{
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -2122,6 +2326,30 @@ const docTemplateClusterAPI = `{
}
}
},
+ "github_com_datarhei_core_v16_cluster_store.Process": {
+ "type": "object",
+ "properties": {
+ "config": {
+ "$ref": "#/definitions/app.Config"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "order": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"identity.Auth0Tenant": {
"type": "object",
"properties": {
@@ -2218,6 +2446,32 @@ const docTemplateClusterAPI = `{
}
}
},
+ "policy.Policy": {
+ "type": "object",
+ "properties": {
+ "actions": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "domain": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "resource": {
+ "type": "string"
+ },
+ "types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"skills.Codec": {
"type": "object",
"properties": {
diff --git a/cluster/docs/ClusterAPI_swagger.json b/cluster/docs/ClusterAPI_swagger.json
index 70cf3f4e..4932b2da 100644
--- a/cluster/docs/ClusterAPI_swagger.json
+++ b/cluster/docs/ClusterAPI_swagger.json
@@ -58,7 +58,7 @@
"200": {
"description": "OK",
"schema": {
- "type": "string"
+ "$ref": "#/definitions/client.AboutResponse"
}
},
"500": {
@@ -761,6 +761,64 @@
}
},
"/v1/process/{id}": {
+ "get": {
+ "description": "Get a process from the cluster DB",
+ "produces": [
+ "application/json"
+ ],
+ "tags": [
+ "v1.0.0"
+ ],
+ "summary": "Get a process",
+ "operationId": "cluster-1-get-process",
+ "parameters": [
+ {
+ "type": "string",
+ "description": "Process ID",
+ "name": "id",
+ "in": "path",
+ "required": true
+ },
+ {
+ "type": "string",
+ "description": "Domain to act on",
+ "name": "domain",
+ "in": "query"
+ },
+ {
+ "type": "string",
+ "description": "Origin ID of request",
+ "name": "X-Cluster-Origin",
+ "in": "header"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/client.GetProcessResponse"
+ }
+ },
+ "404": {
+ "description": "Not Found",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ },
+ "500": {
+ "description": "Internal Server Error",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ },
+ "508": {
+ "description": "Loop Detected",
+ "schema": {
+ "$ref": "#/definitions/cluster.Error"
+ }
+ }
+ }
+ },
"put": {
"description": "Replace an existing process in the cluster DB",
"consumes": [
@@ -1217,32 +1275,6 @@
}
},
"definitions": {
- "access.Policy": {
- "type": "object",
- "properties": {
- "actions": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "domain": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "resource": {
- "type": "string"
- },
- "types": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
"app.Config": {
"type": "object",
"properties": {
@@ -1268,6 +1300,14 @@
"description": "percent",
"type": "number"
},
+ "limitGPU": {
+ "description": "GPU limits",
+ "allOf": [
+ {
+ "$ref": "#/definitions/app.ConfigLimitGPU"
+ }
+ ]
+ },
"limitMemory": {
"description": "bytes",
"type": "integer"
@@ -1277,7 +1317,7 @@
"type": "integer"
},
"logPatterns": {
- "description": "will we interpreted as regular expressions",
+ "description": "will be interpreted as regular expressions",
"type": "array",
"items": {
"type": "string"
@@ -1362,6 +1402,132 @@
}
}
},
+ "app.ConfigLimitGPU": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "encoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "memory": {
+ "description": "bytes",
+ "type": "integer"
+ },
+ "usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "client.AboutResponse": {
+ "type": "object",
+ "properties": {
+ "address": {
+ "type": "string"
+ },
+ "id": {
+ "type": "string"
+ },
+ "resources": {
+ "$ref": "#/definitions/client.AboutResponseResources"
+ },
+ "started_at": {
+ "type": "string"
+ },
+ "version": {
+ "type": "string"
+ }
+ }
+ },
+ "client.AboutResponseGPUResources": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "memory_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "usage": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "client.AboutResponseResources": {
+ "type": "object",
+ "properties": {
+ "cpu": {
+ "description": "Current CPU load, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_core": {
+ "description": "Current CPU load of the core itself, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_limit": {
+ "description": "Defined CPU load limit, 0-100*ncpu",
+ "type": "number"
+ },
+ "error": {
+ "description": "Last error",
+ "type": "string"
+ },
+ "gpu": {
+ "description": "Currently used GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/client.AboutResponseGPUResources"
+ }
+ },
+ "is_throttling": {
+ "description": "Whether this core is currently throttling",
+ "type": "boolean"
+ },
+ "memory_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "memory_core_bytes": {
+ "description": "Current used memory of the core itself in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "ncpu": {
+ "description": "Number of CPU on this node",
+ "type": "number"
+ }
+ }
+ },
"client.AddIdentityRequest": {
"type": "object",
"properties": {
@@ -1378,6 +1544,17 @@
}
}
},
+ "client.GetProcessResponse": {
+ "type": "object",
+ "properties": {
+ "nodeid": {
+ "type": "string"
+ },
+ "process": {
+ "$ref": "#/definitions/github_com_datarhei_core_v16_cluster_store.Process"
+ }
+ }
+ },
"client.JoinRequest": {
"type": "object",
"properties": {
@@ -1436,7 +1613,7 @@
"policies": {
"type": "array",
"items": {
- "$ref": "#/definitions/access.Policy"
+ "$ref": "#/definitions/policy.Policy"
}
}
}
@@ -1630,6 +1807,26 @@
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -1822,6 +2019,14 @@
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -2114,6 +2319,30 @@
}
}
},
+ "github_com_datarhei_core_v16_cluster_store.Process": {
+ "type": "object",
+ "properties": {
+ "config": {
+ "$ref": "#/definitions/app.Config"
+ },
+ "createdAt": {
+ "type": "string"
+ },
+ "error": {
+ "type": "string"
+ },
+ "metadata": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "order": {
+ "type": "string"
+ },
+ "updatedAt": {
+ "type": "string"
+ }
+ }
+ },
"identity.Auth0Tenant": {
"type": "object",
"properties": {
@@ -2210,6 +2439,32 @@
}
}
},
+ "policy.Policy": {
+ "type": "object",
+ "properties": {
+ "actions": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "domain": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string"
+ },
+ "resource": {
+ "type": "string"
+ },
+ "types": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
"skills.Codec": {
"type": "object",
"properties": {
diff --git a/cluster/docs/ClusterAPI_swagger.yaml b/cluster/docs/ClusterAPI_swagger.yaml
index c0628aca..4374ccca 100644
--- a/cluster/docs/ClusterAPI_swagger.yaml
+++ b/cluster/docs/ClusterAPI_swagger.yaml
@@ -1,22 +1,5 @@
basePath: /
definitions:
- access.Policy:
- properties:
- actions:
- items:
- type: string
- type: array
- domain:
- type: string
- name:
- type: string
- resource:
- type: string
- types:
- items:
- type: string
- type: array
- type: object
app.Config:
properties:
autostart:
@@ -34,6 +17,10 @@ definitions:
limitCPU:
description: percent
type: number
+ limitGPU:
+ allOf:
+ - $ref: '#/definitions/app.ConfigLimitGPU'
+ description: GPU limits
limitMemory:
description: bytes
type: integer
@@ -41,7 +28,7 @@ definitions:
description: seconds
type: integer
logPatterns:
- description: will we interpreted as regular expressions
+ description: will be interpreted as regular expressions
items:
type: string
type: array
@@ -98,6 +85,96 @@ definitions:
purgeOnDelete:
type: boolean
type: object
+ app.ConfigLimitGPU:
+ properties:
+ decoder:
+ description: percent 0-100
+ type: number
+ encoder:
+ description: percent 0-100
+ type: number
+ memory:
+ description: bytes
+ type: integer
+ usage:
+ description: percent 0-100
+ type: number
+ type: object
+ client.AboutResponse:
+ properties:
+ address:
+ type: string
+ id:
+ type: string
+ resources:
+ $ref: '#/definitions/client.AboutResponseResources'
+ started_at:
+ type: string
+ version:
+ type: string
+ type: object
+ client.AboutResponseGPUResources:
+ properties:
+ decoder:
+ description: Current decoder usage, 0-100
+ type: number
+ encoder:
+ description: Current encoder usage, 0-100
+ type: number
+ memory_bytes:
+ description: Currently used memory in bytes
+ type: integer
+ memory_limit_bytes:
+ description: Defined memory limit in bytes
+ type: integer
+ memory_total_bytes:
+ description: Total available memory in bytes
+ type: integer
+ usage:
+ description: Current general usage, 0-100
+ type: number
+ usage_limit:
+ description: Defined general usage limit, 0-100
+ type: number
+ type: object
+ client.AboutResponseResources:
+ properties:
+ cpu:
+ description: Current CPU load, 0-100*ncpu
+ type: number
+ cpu_core:
+ description: Current CPU load of the core itself, 0-100*ncpu
+ type: number
+ cpu_limit:
+ description: Defined CPU load limit, 0-100*ncpu
+ type: number
+ error:
+ description: Last error
+ type: string
+ gpu:
+ description: Currently used GPU resources
+ items:
+ $ref: '#/definitions/client.AboutResponseGPUResources'
+ type: array
+ is_throttling:
+ description: Whether this core is currently throttling
+ type: boolean
+ memory_bytes:
+ description: Currently used memory in bytes
+ type: integer
+ memory_core_bytes:
+ description: Current used memory of the core itself in bytes
+ type: integer
+ memory_limit_bytes:
+ description: Defined memory limit in bytes
+ type: integer
+ memory_total_bytes:
+ description: Total available memory in bytes
+ type: integer
+ ncpu:
+ description: Number of CPU on this node
+ type: number
+ type: object
client.AddIdentityRequest:
properties:
identity:
@@ -108,6 +185,13 @@ definitions:
config:
$ref: '#/definitions/app.Config'
type: object
+ client.GetProcessResponse:
+ properties:
+ nodeid:
+ type: string
+ process:
+ $ref: '#/definitions/github_com_datarhei_core_v16_cluster_store.Process'
+ type: object
client.JoinRequest:
properties:
id:
@@ -145,7 +229,7 @@ definitions:
properties:
policies:
items:
- $ref: '#/definitions/access.Policy'
+ $ref: '#/definitions/policy.Policy'
type: array
type: object
client.SetProcessCommandRequest:
@@ -273,6 +357,19 @@ definitions:
format: int64
type: integer
type: object
+ compress:
+ properties:
+ encoding:
+ items:
+ type: string
+ type: array
+ mimetypes:
+ items:
+ type: string
+ type: array
+ min_length:
+ type: integer
+ type: object
created_at:
description: When this config has been persisted
type: string
@@ -404,6 +501,12 @@ definitions:
max_cpu_usage:
description: percent 0-100
type: number
+ max_gpu_memory_usage:
+ description: percent 0-100
+ type: number
+ max_gpu_usage:
+ description: percent 0-100
+ type: number
max_memory_usage:
description: percent 0-100
type: number
@@ -599,6 +702,22 @@ definitions:
format: int64
type: integer
type: object
+ github_com_datarhei_core_v16_cluster_store.Process:
+ properties:
+ config:
+ $ref: '#/definitions/app.Config'
+ createdAt:
+ type: string
+ error:
+ type: string
+ metadata:
+ additionalProperties: true
+ type: object
+ order:
+ type: string
+ updatedAt:
+ type: string
+ type: object
identity.Auth0Tenant:
properties:
audience:
@@ -662,6 +781,23 @@ definitions:
type: string
type: array
type: object
+ policy.Policy:
+ properties:
+ actions:
+ items:
+ type: string
+ type: array
+ domain:
+ type: string
+ name:
+ type: string
+ resource:
+ type: string
+ types:
+ items:
+ type: string
+ type: array
+ type: object
skills.Codec:
properties:
decoders:
@@ -906,7 +1042,7 @@ paths:
"200":
description: OK
schema:
- type: string
+ $ref: '#/definitions/client.AboutResponse'
"500":
description: Internal Server Error
schema:
@@ -1410,6 +1546,45 @@ paths:
summary: Remove a process
tags:
- v1.0.0
+ get:
+ description: Get a process from the cluster DB
+ operationId: cluster-1-get-process
+ parameters:
+ - description: Process ID
+ in: path
+ name: id
+ required: true
+ type: string
+ - description: Domain to act on
+ in: query
+ name: domain
+ type: string
+ - description: Origin ID of request
+ in: header
+ name: X-Cluster-Origin
+ type: string
+ produces:
+ - application/json
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/client.GetProcessResponse'
+ "404":
+ description: Not Found
+ schema:
+ $ref: '#/definitions/cluster.Error'
+ "500":
+ description: Internal Server Error
+ schema:
+ $ref: '#/definitions/cluster.Error'
+ "508":
+ description: Loop Detected
+ schema:
+ $ref: '#/definitions/cluster.Error'
+ summary: Get a process
+ tags:
+ - v1.0.0
put:
consumes:
- application/json
diff --git a/cluster/leader.go b/cluster/leader.go
index 5358d03e..0c74e04c 100644
--- a/cluster/leader.go
+++ b/cluster/leader.go
@@ -183,6 +183,22 @@ func (c *cluster) monitorLeadership() {
c.leaderLock.Unlock()
}
case <-c.shutdownCh:
+ if weAreFollowerCh != nil {
+ close(weAreFollowerCh)
+ }
+
+ if weAreLeaderCh != nil {
+ close(weAreLeaderCh)
+ }
+
+ if weAreEmergencyLeaderCh != nil {
+ close(weAreEmergencyLeaderCh)
+ }
+
+ leaderLoop.Wait()
+ emergencyLeaderLoop.Wait()
+ followerLoop.Wait()
+
return
}
}
diff --git a/cluster/leader_rebalance.go b/cluster/leader_rebalance.go
index c583f1ac..3ef2b8f7 100644
--- a/cluster/leader_rebalance.go
+++ b/cluster/leader_rebalance.go
@@ -78,7 +78,7 @@ func rebalance(have []node.Process, nodes map[string]node.About) ([]interface{},
// Mark nodes as throttling where at least one process is still throttling
for _, haveP := range have {
- if haveP.Throttling {
+ if haveP.Resources.Throttling {
resources.Throttling(haveP.NodeID, true)
}
}
@@ -126,7 +126,7 @@ func rebalance(have []node.Process, nodes map[string]node.About) ([]interface{},
continue
}
- if resources.HasNodeEnough(raNodeid, p.Config.LimitCPU, p.Config.LimitMemory) {
+ if resources.HasNodeEnough(raNodeid, ResourcesFromConfig(p.Config)) {
availableNodeid = raNodeid
break
}
@@ -135,7 +135,7 @@ func rebalance(have []node.Process, nodes map[string]node.About) ([]interface{},
// Find the best node with enough resources available.
if len(availableNodeid) == 0 {
- nodes := resources.FindBestNodes(p.Config.LimitCPU, p.Config.LimitMemory)
+ nodes := resources.FindBestNodes(ResourcesFromConfig(p.Config))
for _, nodeid := range nodes {
if nodeid == overloadedNodeid {
continue
@@ -169,7 +169,7 @@ func rebalance(have []node.Process, nodes map[string]node.About) ([]interface{},
processes[i] = p
// Adjust the resources.
- resources.Move(availableNodeid, overloadedNodeid, p.CPU, p.Mem)
+ resources.Move(availableNodeid, overloadedNodeid, ResourcesFromProcess(p.Resources))
// Adjust the reference affinity.
haveReferenceAffinity.Move(p.Config.Reference, p.Config.Domain, overloadedNodeid, availableNodeid)
diff --git a/cluster/leader_relocate.go b/cluster/leader_relocate.go
index 285a087f..27ab847b 100644
--- a/cluster/leader_relocate.go
+++ b/cluster/leader_relocate.go
@@ -95,7 +95,7 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
// Mark nodes as throttling where at least one process is still throttling
for _, haveP := range have {
- if haveP.Throttling {
+ if haveP.Resources.Throttling {
resources.Throttling(haveP.NodeID, true)
}
}
@@ -106,6 +106,7 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
haveReferenceAffinity := NewReferenceAffinity(have)
opStack := []interface{}{}
+ opBudget := 100
// Check for any requested relocations.
for processid, targetNodeid := range relocateMap {
@@ -135,7 +136,7 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
if len(targetNodeid) != 0 {
_, hasNode := nodes[targetNodeid]
- if !hasNode || !resources.HasNodeEnough(targetNodeid, process.Config.LimitCPU, process.Config.LimitMemory) {
+ if !hasNode || !resources.HasNodeEnough(targetNodeid, ResourcesFromConfig(process.Config)) {
targetNodeid = ""
}
}
@@ -151,7 +152,7 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
continue
}
- if resources.HasNodeEnough(raNodeid, process.Config.LimitCPU, process.Config.LimitMemory) {
+ if resources.HasNodeEnough(raNodeid, ResourcesFromConfig(process.Config)) {
targetNodeid = raNodeid
break
}
@@ -160,7 +161,7 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
// Find the best node with enough resources available.
if len(targetNodeid) == 0 {
- nodes := resources.FindBestNodes(process.Config.LimitCPU, process.Config.LimitMemory)
+ nodes := resources.FindBestNodes(ResourcesFromConfig(process.Config))
for _, nodeid := range nodes {
if nodeid == sourceNodeid {
continue
@@ -190,8 +191,10 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
order: process.Order,
})
+ opBudget -= 5
+
// Adjust the resources.
- resources.Move(targetNodeid, sourceNodeid, process.CPU, process.Mem)
+ resources.Move(targetNodeid, sourceNodeid, ResourcesFromProcess(process.Resources))
// Adjust the reference affinity.
haveReferenceAffinity.Move(process.Config.Reference, process.Config.Domain, sourceNodeid, targetNodeid)
@@ -199,7 +202,9 @@ func relocate(have []node.Process, nodes map[string]node.About, relocateMap map[
relocatedProcessIDs = append(relocatedProcessIDs, processid)
// Move only one process at a time.
- break
+ if opBudget <= 0 {
+ break
+ }
}
return opStack, resources.Map(), relocatedProcessIDs
diff --git a/cluster/leader_synchronize.go b/cluster/leader_synchronize.go
index b597d78e..c56e4ad8 100644
--- a/cluster/leader_synchronize.go
+++ b/cluster/leader_synchronize.go
@@ -143,7 +143,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
// Mark nodes as throttling where at least one process is still throttling
for _, haveP := range have {
- if haveP.Throttling {
+ if haveP.Resources.Throttling {
resources.Throttling(haveP.NodeID, true)
}
}
@@ -182,7 +182,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
processid: haveP.Config.ProcessID(),
})
- resources.Remove(haveP.NodeID, haveP.CPU, haveP.Mem)
+ resources.Remove(haveP.NodeID, ResourcesFromProcess(haveP.Resources))
continue
}
@@ -219,7 +219,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
})
// Release the resources.
- resources.Remove(haveP.NodeID, haveP.CPU, haveP.Mem)
+ resources.Remove(haveP.NodeID, ResourcesFromProcess(haveP.Resources))
}
}
@@ -229,7 +229,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
for _, haveP := range wantOrderStart {
nodeid := haveP.NodeID
- resources.Add(nodeid, haveP.Config.LimitCPU, haveP.Config.LimitMemory)
+ resources.Add(nodeid, ResourcesFromConfig(haveP.Config))
// TODO: check if the current node has actually enough resources available,
// otherwise it needs to be moved somewhere else. If the node doesn't
@@ -347,7 +347,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
// Try to add the process to a node where other processes with the same reference currently reside.
raNodes := haveReferenceAffinity.Nodes(wantP.Config.Reference, wantP.Config.Domain)
for _, raNodeid := range raNodes {
- if resources.HasNodeEnough(raNodeid, wantP.Config.LimitCPU, wantP.Config.LimitMemory) {
+ if resources.HasNodeEnough(raNodeid, ResourcesFromConfig(wantP.Config)) {
nodeid = raNodeid
break
}
@@ -355,7 +355,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
// Find the node with the most resources available.
if len(nodeid) == 0 {
- nodes := resources.FindBestNodes(wantP.Config.LimitCPU, wantP.Config.LimitMemory)
+ nodes := resources.FindBestNodes(ResourcesFromConfig(wantP.Config))
if len(nodes) > 0 {
nodeid = nodes[0]
}
@@ -372,7 +372,7 @@ func synchronize(wish map[string]string, want []store.Process, have []node.Proce
opBudget -= 3
// Consume the resources
- resources.Add(nodeid, wantP.Config.LimitCPU, wantP.Config.LimitMemory)
+ resources.Add(nodeid, ResourcesFromConfig(wantP.Config))
reality[pid] = nodeid
diff --git a/cluster/leader_test.go b/cluster/leader_test.go
index 4f1d6bba..af17d9a6 100644
--- a/cluster/leader_test.go
+++ b/cluster/leader_test.go
@@ -193,11 +193,13 @@ func TestSynchronizeOrderStop(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -285,11 +287,13 @@ func TestSynchronizeOrderStart(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "stop",
- State: "finished",
- CPU: 0,
- Mem: 0,
+ NodeID: "node1",
+ Order: "stop",
+ State: "finished",
+ Resources: node.ProcessResources{
+ CPU: 0,
+ Mem: 0,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -388,11 +392,13 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) {
have := []node.Process{
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -490,11 +496,13 @@ func TestSynchronizeAddReferenceAffinityMultiple(t *testing.T) {
have := []node.Process{
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 2,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -882,11 +890,13 @@ func TestSynchronizeRemove(t *testing.T) {
have := []node.Process{
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar",
@@ -967,11 +977,13 @@ func TestSynchronizeAddRemove(t *testing.T) {
have := []node.Process{
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -1064,11 +1076,13 @@ func TestSynchronizeNoUpdate(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar",
@@ -1133,11 +1147,13 @@ func TestSynchronizeUpdate(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar",
@@ -1217,11 +1233,13 @@ func TestSynchronizeUpdateMetadata(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar",
@@ -1313,11 +1331,13 @@ func TestSynchronizeWaitDisconnectedNode(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -1397,11 +1417,13 @@ func TestSynchronizeWaitDisconnectedNodeNoWish(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -1493,11 +1515,13 @@ func TestSynchronizeWaitDisconnectedNodeUnrealisticWish(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -1589,11 +1613,13 @@ func TestSynchronizeTimeoutDisconnectedNode(t *testing.T) {
have := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
UpdatedAt: now,
Config: &app.Config{
@@ -1655,22 +1681,26 @@ func TestSynchronizeTimeoutDisconnectedNode(t *testing.T) {
func TestRebalanceNothingToDo(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 35,
- Mem: 20,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 35,
+ Mem: 20,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -1711,33 +1741,39 @@ func TestRebalanceNothingToDo(t *testing.T) {
func TestRebalanceOverload(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 35,
- Mem: 20,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 35,
+ Mem: 20,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 17,
- Mem: 31,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 17,
+ Mem: 31,
+ },
Runtime: 27,
Config: &app.Config{
ID: "foobar3",
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -1806,33 +1842,39 @@ func TestRebalanceOverload(t *testing.T) {
func TestRebalanceSkip(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 35,
- Mem: 20,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 35,
+ Mem: 20,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 17,
- Mem: 31,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 17,
+ Mem: 31,
+ },
Runtime: 27,
Config: &app.Config{
ID: "foobar3",
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -1908,22 +1950,26 @@ func TestRebalanceSkip(t *testing.T) {
func TestRebalanceReferenceAffinity(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar2",
@@ -1931,11 +1977,13 @@ func TestRebalanceReferenceAffinity(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar3",
@@ -1943,11 +1991,13 @@ func TestRebalanceReferenceAffinity(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar4",
@@ -1955,11 +2005,13 @@ func TestRebalanceReferenceAffinity(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar5",
@@ -2048,33 +2100,39 @@ func TestRebalanceReferenceAffinity(t *testing.T) {
func TestRebalanceRelocateTarget(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 35,
- Mem: 20,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 35,
+ Mem: 20,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 17,
- Mem: 31,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 17,
+ Mem: 31,
+ },
Runtime: 27,
Config: &app.Config{
ID: "foobar3",
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -2165,33 +2223,39 @@ func TestRebalanceRelocateTarget(t *testing.T) {
func TestRebalanceRelocateAny(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 35,
- Mem: 20,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 35,
+ Mem: 20,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 17,
- Mem: 31,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 17,
+ Mem: 31,
+ },
Runtime: 27,
Config: &app.Config{
ID: "foobar3",
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 12,
- Mem: 5,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 12,
+ Mem: 5,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar2",
@@ -2319,7 +2383,10 @@ func TestFindBestNodesForProcess(t *testing.T) {
resources := NewResourcePlanner(nodes)
- list := resources.FindBestNodes(35, 20)
+ list := resources.FindBestNodes(Resources{
+ CPU: 35,
+ Mem: 20,
+ })
require.Equal(t, []string{"node3", "node2", "node1"}, list)
}
@@ -2433,7 +2500,10 @@ func TestFindBestNodesForProcess2(t *testing.T) {
},
}
- list := resources.FindBestNodes(4.0, 45*1024*1024)
+ list := resources.FindBestNodes(Resources{
+ CPU: 4.0,
+ Mem: 45 * 1024 * 1024,
+ })
require.Equal(t, []string{"node10", "node8", "node7", "node1", "node5", "node12", "node4", "node3", "node13", "node6", "node11", "node2"}, list)
}
@@ -2441,11 +2511,13 @@ func TestFindBestNodesForProcess2(t *testing.T) {
func TestCreateNodeProcessMap(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "finished",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "finished",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar7",
@@ -2453,11 +2525,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "failed",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "failed",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar8",
@@ -2465,22 +2539,26 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar2",
@@ -2488,11 +2566,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 67,
Config: &app.Config{
ID: "foobar3",
@@ -2500,11 +2580,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar6",
@@ -2512,11 +2594,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 41,
Config: &app.Config{
ID: "foobar4",
@@ -2524,11 +2608,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar5",
@@ -2542,11 +2628,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
require.Equal(t, map[string][]node.Process{
"node1": {
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar2",
@@ -2554,11 +2642,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
@@ -2567,11 +2657,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
"node2": {
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar6",
@@ -2579,11 +2671,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 67,
Config: &app.Config{
ID: "foobar3",
@@ -2593,11 +2687,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
"node3": {
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 41,
Config: &app.Config{
ID: "foobar4",
@@ -2605,11 +2701,13 @@ func TestCreateNodeProcessMap(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar5",
@@ -2623,22 +2721,26 @@ func TestCreateNodeProcessMap(t *testing.T) {
func TestCreateReferenceAffinityNodeMap(t *testing.T) {
processes := []node.Process{
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar1",
},
},
{
- NodeID: "node1",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node1",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 1,
Config: &app.Config{
ID: "foobar2",
@@ -2646,11 +2748,13 @@ func TestCreateReferenceAffinityNodeMap(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar3",
@@ -2658,11 +2762,13 @@ func TestCreateReferenceAffinityNodeMap(t *testing.T) {
},
},
{
- NodeID: "node2",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node2",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar3",
@@ -2670,11 +2776,13 @@ func TestCreateReferenceAffinityNodeMap(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar4",
@@ -2682,11 +2790,13 @@ func TestCreateReferenceAffinityNodeMap(t *testing.T) {
},
},
{
- NodeID: "node3",
- Order: "start",
- State: "running",
- CPU: 1,
- Mem: 1,
+ NodeID: "node3",
+ Order: "start",
+ State: "running",
+ Resources: node.ProcessResources{
+ CPU: 1,
+ Mem: 1,
+ },
Runtime: 42,
Config: &app.Config{
ID: "foobar5",
diff --git a/cluster/node/cache.go b/cluster/node/cache.go
index bce3db5c..24041f23 100644
--- a/cluster/node/cache.go
+++ b/cluster/node/cache.go
@@ -4,17 +4,9 @@ import (
"errors"
"sync"
"time"
-)
-
-type TimeSource interface {
- Now() time.Time
-}
-type StdTimeSource struct{}
-
-func (s *StdTimeSource) Now() time.Time {
- return time.Now()
-}
+ timesrc "github.com/datarhei/core/v16/time"
+)
type CacheEntry[T any] struct {
value T
@@ -22,20 +14,20 @@ type CacheEntry[T any] struct {
}
type Cache[T any] struct {
- ts TimeSource
+ ts timesrc.Source
lock sync.Mutex
entries map[string]CacheEntry[T]
lastPurge time.Time
}
-func NewCache[T any](ts TimeSource) *Cache[T] {
+func NewCache[T any](ts timesrc.Source) *Cache[T] {
c := &Cache[T]{
ts: ts,
entries: map[string]CacheEntry[T]{},
}
if c.ts == nil {
- c.ts = &StdTimeSource{}
+ c.ts = ×rc.StdSource{}
}
c.lastPurge = c.ts.Now()
diff --git a/cluster/node/cache_test.go b/cluster/node/cache_test.go
index 2c8d7a8f..444f60d8 100644
--- a/cluster/node/cache_test.go
+++ b/cluster/node/cache_test.go
@@ -4,20 +4,14 @@ import (
"testing"
"time"
+ timesrc "github.com/datarhei/core/v16/time"
+
"github.com/stretchr/testify/require"
)
-type testTimeSource struct {
- now time.Time
-}
-
-func (t *testTimeSource) Now() time.Time {
- return t.now
-}
-
func TestCache(t *testing.T) {
- ts := &testTimeSource{
- now: time.Unix(0, 0),
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
}
c := NewCache[string](ts)
@@ -31,21 +25,21 @@ func TestCache(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "bar", v)
- ts.now = time.Unix(10, 0)
+ ts.Set(10, 0)
v, err = c.Get("foo")
require.NoError(t, err)
require.Equal(t, "bar", v)
- ts.now = time.Unix(11, 0)
+ ts.Set(11, 0)
_, err = c.Get("foo")
require.Error(t, err)
}
func TestCachePurge(t *testing.T) {
- ts := &testTimeSource{
- now: time.Unix(0, 0),
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
}
c := NewCache[string](ts)
@@ -56,14 +50,14 @@ func TestCachePurge(t *testing.T) {
require.NoError(t, err)
require.Equal(t, "bar", v)
- ts.now = time.Unix(59, 0)
+ ts.Set(59, 0)
c.Put("foz", "boz", 10*time.Second)
_, ok := c.entries["foo"]
require.True(t, ok)
- ts.now = time.Unix(61, 0)
+ ts.Set(61, 0)
c.Put("foz", "boz", 10*time.Second)
diff --git a/cluster/node/core.go b/cluster/node/core.go
index 2af52762..9dc87e87 100644
--- a/cluster/node/core.go
+++ b/cluster/node/core.go
@@ -164,8 +164,9 @@ func (n *Core) connect() error {
Address: u.String(),
Client: &http.Client{
Transport: tr,
- Timeout: 5 * time.Second,
+ Timeout: 0,
},
+ Timeout: 5 * time.Second,
})
if err != nil {
return fmt.Errorf("creating client failed (%s): %w", address, err)
@@ -267,7 +268,6 @@ type CoreVersion struct {
}
func (n *Core) About() (CoreAbout, error) {
-
n.lock.RLock()
client := n.client
n.lock.RUnlock()
@@ -747,16 +747,62 @@ func (n *Core) MediaGetInfo(prefix, path string) (int64, time.Time, error) {
}
type Process struct {
- NodeID string
- Order string
- State string
+ NodeID string
+ Order string
+ State string
+ Resources ProcessResources
+ Runtime time.Duration
+ UpdatedAt time.Time
+ Config *app.Config
+ Metadata map[string]interface{}
+}
+
+type ProcessResources struct {
CPU float64 // Current CPU load of this process, 0-100*ncpu
Mem uint64 // Currently consumed memory of this process in bytes
+ GPU ProcessGPUResources
Throttling bool
- Runtime time.Duration
- UpdatedAt time.Time
- Config *app.Config
- Metadata map[string]interface{}
+}
+
+type ProcessGPUResources struct {
+ Index int // GPU number
+ Usage float64 // Current GPU load, 0-100
+ Encoder float64 // Current GPU encoder load, 0-100
+ Decoder float64 // Current GPU decoder load, 0-100
+ Mem uint64 // Currently consumed GPU memory of this process in bytes
+}
+
+func (p *ProcessResources) Marshal(a *api.ProcessUsage) {
+ p.Throttling = a.CPU.IsThrottling
+
+ if x, err := a.CPU.Current.Float64(); err == nil {
+ p.CPU = x
+ } else {
+ p.CPU = 0
+ }
+
+ p.Mem = a.Memory.Current
+
+ if x, err := a.GPU.Usage.Current.Float64(); err == nil {
+ p.GPU.Usage = x
+ } else {
+ p.GPU.Usage = 0
+ }
+
+ if x, err := a.GPU.Encoder.Current.Float64(); err == nil {
+ p.GPU.Encoder = x
+ } else {
+ p.GPU.Encoder = 0
+ }
+
+ if x, err := a.GPU.Decoder.Current.Float64(); err == nil {
+ p.GPU.Decoder = x
+ } else {
+ p.GPU.Decoder = 0
+ }
+
+ p.GPU.Mem = a.GPU.Memory.Current
+ p.GPU.Index = a.GPU.Index
}
func (n *Core) ClusterProcessList() ([]Process, error) {
@@ -780,22 +826,16 @@ func (n *Core) ClusterProcessList() ([]Process, error) {
p.Config = &api.ProcessConfig{}
}
- cpu, err := p.State.Resources.CPU.Current.Float64()
- if err != nil {
- cpu = 0
- }
-
process := Process{
- NodeID: nodeid,
- Order: p.State.Order,
- State: p.State.State,
- Mem: p.State.Resources.Memory.Current,
- CPU: cpu,
- Throttling: p.State.Resources.CPU.IsThrottling,
- Runtime: time.Duration(p.State.Runtime) * time.Second,
- UpdatedAt: time.Unix(p.UpdatedAt, 0),
+ NodeID: nodeid,
+ Order: p.State.Order,
+ State: p.State.State,
+ Runtime: time.Duration(p.State.Runtime) * time.Second,
+ UpdatedAt: time.Unix(p.UpdatedAt, 0),
}
+ process.Resources.Marshal(&p.State.Resources)
+
config, _ := p.Config.Marshal()
process.Config = config
@@ -808,3 +848,15 @@ func (n *Core) ClusterProcessList() ([]Process, error) {
return processes, nil
}
+
+func (n *Core) Events(ctx context.Context, filters api.EventFilters) (<-chan api.Event, error) {
+ n.lock.RLock()
+ client := n.client
+ n.lock.RUnlock()
+
+ if client == nil {
+ return nil, ErrNoPeer
+ }
+
+ return client.Events(ctx, filters)
+}
diff --git a/cluster/node/manager.go b/cluster/node/manager.go
index 5518a59e..d9eefc3e 100644
--- a/cluster/node/manager.go
+++ b/cluster/node/manager.go
@@ -1,6 +1,7 @@
package node
import (
+ "context"
"errors"
"fmt"
"io"
@@ -154,7 +155,7 @@ func (p *Manager) NodeGet(id string) (*Node, error) {
node, ok := p.nodes[id]
if !ok {
- return nil, fmt.Errorf("node not found")
+ return nil, fmt.Errorf("node not found: %s", id)
}
return node, nil
@@ -538,7 +539,7 @@ func (p *Manager) ProcessList(options client.ProcessListOptions) []api.Process {
func (p *Manager) ProcessGet(nodeid string, id app.ProcessID, filter []string) (api.Process, error) {
node, err := p.NodeGet(nodeid)
if err != nil {
- return api.Process{}, fmt.Errorf("node not found: %w", err)
+ return api.Process{}, err
}
list, err := node.Core().ProcessList(client.ProcessListOptions{
@@ -550,13 +551,17 @@ func (p *Manager) ProcessGet(nodeid string, id app.ProcessID, filter []string) (
return api.Process{}, err
}
+ if len(list) == 0 {
+ return api.Process{}, fmt.Errorf("process not found")
+ }
+
return list[0], nil
}
func (p *Manager) ProcessAdd(nodeid string, config *app.Config, metadata map[string]interface{}) error {
node, err := p.NodeGet(nodeid)
if err != nil {
- return fmt.Errorf("node not found: %w", err)
+ return err
}
return node.Core().ProcessAdd(config, metadata)
@@ -565,7 +570,7 @@ func (p *Manager) ProcessAdd(nodeid string, config *app.Config, metadata map[str
func (p *Manager) ProcessDelete(nodeid string, id app.ProcessID) error {
node, err := p.NodeGet(nodeid)
if err != nil {
- return fmt.Errorf("node not found: %w", err)
+ return err
}
return node.Core().ProcessDelete(id)
@@ -574,7 +579,7 @@ func (p *Manager) ProcessDelete(nodeid string, id app.ProcessID) error {
func (p *Manager) ProcessUpdate(nodeid string, id app.ProcessID, config *app.Config, metadata map[string]interface{}) error {
node, err := p.NodeGet(nodeid)
if err != nil {
- return fmt.Errorf("node not found: %w", err)
+ return err
}
return node.Core().ProcessUpdate(id, config, metadata)
@@ -583,7 +588,7 @@ func (p *Manager) ProcessUpdate(nodeid string, id app.ProcessID, config *app.Con
func (p *Manager) ProcessReportSet(nodeid string, id app.ProcessID, report *app.Report) error {
node, err := p.NodeGet(nodeid)
if err != nil {
- return fmt.Errorf("node not found: %w", err)
+ return err
}
return node.Core().ProcessReportSet(id, report)
@@ -592,7 +597,7 @@ func (p *Manager) ProcessReportSet(nodeid string, id app.ProcessID, report *app.
func (p *Manager) ProcessCommand(nodeid string, id app.ProcessID, command string) error {
node, err := p.NodeGet(nodeid)
if err != nil {
- return fmt.Errorf("node not found: %w", err)
+ return err
}
return node.Core().ProcessCommand(id, command)
@@ -604,7 +609,7 @@ func (p *Manager) ProcessProbe(nodeid string, id app.ProcessID) (api.Probe, erro
probe := api.Probe{
Log: []string{fmt.Sprintf("the node %s where the process %s should reside on, doesn't exist", nodeid, id.String())},
}
- return probe, fmt.Errorf("node not found: %w", err)
+ return probe, err
}
return node.Core().ProcessProbe(id)
@@ -616,8 +621,30 @@ func (p *Manager) ProcessProbeConfig(nodeid string, config *app.Config) (api.Pro
probe := api.Probe{
Log: []string{fmt.Sprintf("the node %s where the process config should be probed on, doesn't exist", nodeid)},
}
- return probe, fmt.Errorf("node not found: %w", err)
+ return probe, err
}
return node.Core().ProcessProbeConfig(config)
}
+
+func (p *Manager) Events(ctx context.Context, filters api.EventFilters) (<-chan api.Event, error) {
+ eventChan := make(chan api.Event, 128)
+
+ p.lock.RLock()
+ for _, n := range p.nodes {
+ go func(node *Node, e chan<- api.Event) {
+ eventChan, err := node.Core().Events(ctx, filters)
+ if err != nil {
+ return
+ }
+
+ for event := range eventChan {
+ event.CoreID = node.id
+ e <- event
+ }
+ }(n, eventChan)
+ }
+ p.lock.RUnlock()
+
+ return eventChan, nil
+}
diff --git a/cluster/node/node.go b/cluster/node/node.go
index 91bf2085..a455d6d1 100644
--- a/cluster/node/node.go
+++ b/cluster/node/node.go
@@ -141,17 +141,28 @@ type About struct {
SpawnedAt time.Time
}
+type ResourcesGPU struct {
+ Mem uint64 // Currently used memory in bytes
+ MemLimit uint64 // Defined memory limit in bytes
+ MemTotal uint64 // Total available memory in bytes
+ Usage float64 // Current general usage, 0-100
+ UsageLimit float64 // Defined general usage limit, 0-100
+ Encoder float64 // Current encoder usage, 0-100
+ Decoder float64 // Current decoder usage, 0-100
+}
+
type Resources struct {
- IsThrottling bool // Whether this core is currently throttling
- NCPU float64 // Number of CPU on this node
- CPU float64 // Current CPU load, 0-100*ncpu
- CPULimit float64 // Defined CPU load limit, 0-100*ncpu
- CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu
- Mem uint64 // Currently used memory in bytes
- MemLimit uint64 // Defined memory limit in bytes
- MemTotal uint64 // Total available memory in bytes
- MemCore uint64 // Current used memory of the core itself in bytes
- Error error // Last error
+ IsThrottling bool // Whether this core is currently throttling
+ NCPU float64 // Number of CPU on this node
+ CPU float64 // Current CPU load, 0-100*ncpu
+ CPULimit float64 // Defined CPU load limit, 0-100*ncpu
+ CPUCore float64 // Current CPU load of the core itself, 0-100*ncpu
+ Mem uint64 // Currently used memory in bytes
+ MemLimit uint64 // Defined memory limit in bytes
+ MemTotal uint64 // Total available memory in bytes
+ MemCore uint64 // Current used memory of the core itself in bytes
+ GPU []ResourcesGPU // Currently used GPU resources
+ Error error // Last error
}
func (n *Node) About() About {
@@ -518,6 +529,20 @@ func (n *Node) ping(ctx context.Context, interval time.Duration) {
Error: nil,
},
}
+
+ if len(about.Resources.GPU) != 0 {
+ n.nodeAbout.Resources.GPU = make([]ResourcesGPU, len(about.Resources.GPU))
+ for i, gpu := range about.Resources.GPU {
+ n.nodeAbout.Resources.GPU[i].Mem = gpu.Mem
+ n.nodeAbout.Resources.GPU[i].MemLimit = gpu.MemLimit
+ n.nodeAbout.Resources.GPU[i].MemTotal = gpu.MemTotal
+ n.nodeAbout.Resources.GPU[i].Usage = gpu.Usage
+ n.nodeAbout.Resources.GPU[i].UsageLimit = gpu.UsageLimit
+ n.nodeAbout.Resources.GPU[i].Encoder = gpu.Encoder
+ n.nodeAbout.Resources.GPU[i].Decoder = gpu.Decoder
+ }
+ }
+
if len(about.Resources.Error) != 0 {
n.nodeAbout.Resources.Error = errors.New(about.Resources.Error)
}
diff --git a/cluster/raft/raft.go b/cluster/raft/raft.go
index 72c19cf3..ac31729e 100644
--- a/cluster/raft/raft.go
+++ b/cluster/raft/raft.go
@@ -320,18 +320,6 @@ func (r *raft) LeadershipTransfer(id string) error {
return nil
}
-type readCloserWrapper struct {
- io.Reader
-}
-
-func (rcw *readCloserWrapper) Read(p []byte) (int, error) {
- return rcw.Reader.Read(p)
-}
-
-func (rcw *readCloserWrapper) Close() error {
- return nil
-}
-
type Snapshot struct {
Metadata *hcraft.SnapshotMeta
Data string
@@ -361,14 +349,14 @@ func (r *raft) Snapshot() (io.ReadCloser, error) {
Data: base64.StdEncoding.EncodeToString(data),
}
- buffer := bytes.Buffer{}
- enc := json.NewEncoder(&buffer)
+ buffer := &bytes.Buffer{}
+ enc := json.NewEncoder(buffer)
err = enc.Encode(snapshot)
if err != nil {
return nil, err
}
- return &readCloserWrapper{&buffer}, nil
+ return io.NopCloser(buffer), nil
}
func (r *raft) start(fsm hcraft.FSM, peers []Peer, inmem bool) error {
diff --git a/cluster/resources.go b/cluster/resources.go
index 2b5bb2c9..cc81b828 100644
--- a/cluster/resources.go
+++ b/cluster/resources.go
@@ -4,8 +4,69 @@ import (
"sort"
"github.com/datarhei/core/v16/cluster/node"
+ "github.com/datarhei/core/v16/restream/app"
)
+type Resources struct {
+ CPU float64 // CPU 0-100*ncpu
+ Mem uint64 // Memoryin bytes
+ GPU ResourcesGPU // GPU resources
+}
+
+type ResourcesGPU struct {
+ Index int // GPU number
+ Usage float64 // GPU general, 0-100
+ Encoder float64 // GPU encoder, 0-100
+ Decoder float64 // GPU decoder, 0-100
+ Mem uint64 // GPU memory in bytes
+}
+
+func ResourcesFromConfig(c *app.Config) Resources {
+ r := Resources{}
+ r.MarshalConfig(c)
+ return r
+}
+
+func ResourcesFromProcess(c node.ProcessResources) Resources {
+ r := Resources{}
+ r.MarshalProcess(c)
+ return r
+}
+
+func (r *Resources) MarshalConfig(c *app.Config) {
+ r.CPU = c.LimitCPU
+ r.Mem = c.LimitMemory
+ r.GPU.Usage = c.LimitGPU.Usage
+ r.GPU.Encoder = c.LimitGPU.Encoder
+ r.GPU.Decoder = c.LimitGPU.Decoder
+ r.GPU.Index = -1
+}
+
+func (r *Resources) MarshalProcess(c node.ProcessResources) {
+ r.CPU = c.CPU
+ r.Mem = c.Mem
+ r.GPU.Usage = c.GPU.Usage
+ r.GPU.Encoder = c.GPU.Encoder
+ r.GPU.Decoder = c.GPU.Decoder
+ r.GPU.Index = c.GPU.Index
+}
+
+func (r *Resources) HasGPU() bool {
+ if r.GPU.Usage > 0 || r.GPU.Encoder > 0 || r.GPU.Decoder > 0 || r.GPU.Mem > 0 {
+ return true
+ }
+
+ return false
+}
+
+func (r *Resources) DoesFitGPU(g node.ResourcesGPU) bool {
+ if g.Usage+r.GPU.Usage < g.UsageLimit && g.Encoder+r.GPU.Encoder < g.UsageLimit && g.Decoder+r.GPU.Decoder < g.UsageLimit && g.Mem+r.GPU.Mem < g.MemLimit {
+ return true
+ }
+
+ return false
+}
+
type resourcePlanner struct {
nodes map[string]node.Resources
blocked map[string]struct{}
@@ -39,8 +100,8 @@ func (r *resourcePlanner) Throttling(nodeid string, throttling bool) {
}
// HasNodeEnough returns whether a node has enough resources available for the
-// requested cpu and memory consumption.
-func (r *resourcePlanner) HasNodeEnough(nodeid string, cpu float64, mem uint64) bool {
+// requested cpu, memory, anf gpu consumption.
+func (r *resourcePlanner) HasNodeEnough(nodeid string, req Resources) bool {
res, hasNode := r.nodes[nodeid]
if !hasNode {
return false
@@ -50,20 +111,39 @@ func (r *resourcePlanner) HasNodeEnough(nodeid string, cpu float64, mem uint64)
return false
}
- if res.Error == nil && res.CPU+cpu < res.CPULimit && res.Mem+mem < res.MemLimit && !res.IsThrottling {
- return true
+ if res.Error != nil || res.IsThrottling {
+ return false
}
- return false
+ if res.CPU+req.CPU >= res.CPULimit || res.Mem+req.Mem >= res.MemLimit {
+ return false
+ }
+
+ if req.HasGPU() {
+ found := false
+
+ for _, g := range res.GPU {
+ if req.DoesFitGPU(g) {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return false
+ }
+ }
+
+ return true
}
-// FindBestNodes returns an array of nodeids that can fit the requested cpu and memory requirements. If no
+// FindBestNodes returns an array of nodeids that can fit the requested cpu, memory, and gpu requirements. If no
// such node is available, an empty array is returned. The array is sorted by the most suitable node first.
-func (r *resourcePlanner) FindBestNodes(cpu float64, mem uint64) []string {
+func (r *resourcePlanner) FindBestNodes(req Resources) []string {
nodes := []string{}
for id := range r.nodes {
- if r.HasNodeEnough(id, cpu, mem) {
+ if r.HasNodeEnough(id, req) {
nodes = append(nodes, id)
}
}
@@ -81,43 +161,72 @@ func (r *resourcePlanner) FindBestNodes(cpu float64, mem uint64) []string {
return nodes
}
-// Add adds the resources of the node according to the cpu and memory utilization.
-func (r *resourcePlanner) Add(nodeid string, cpu float64, mem uint64) {
+// Add adds the resources of the node according to the cpu, memory, and gpu utilization.
+func (r *resourcePlanner) Add(nodeid string, req Resources) {
res, hasRes := r.nodes[nodeid]
if !hasRes {
return
}
- res.CPU += cpu
- res.Mem += mem
+ res.CPU += req.CPU
+ res.Mem += req.Mem
+
+ if req.HasGPU() {
+ for i, g := range res.GPU {
+ if req.DoesFitGPU(g) {
+ g.Usage += req.GPU.Usage
+ g.Encoder += req.GPU.Encoder
+ g.Decoder += req.GPU.Decoder
+ g.Mem += req.GPU.Mem
+ res.GPU[i] = g
+ break
+ }
+ }
+ }
+
r.nodes[nodeid] = res
}
-// Remove subtracts the resources from the node according to the cpu and memory utilization.
-func (r *resourcePlanner) Remove(nodeid string, cpu float64, mem uint64) {
+// Remove subtracts the resources from the node according to the cpu, memory, and gpu utilization.
+func (r *resourcePlanner) Remove(nodeid string, req Resources) {
res, hasRes := r.nodes[nodeid]
if !hasRes {
return
}
- res.CPU -= cpu
- if res.CPU < 0 {
- res.CPU = 0
- }
- if mem >= res.Mem {
- res.Mem = 0
- } else {
- res.Mem -= mem
+ res.CPU -= min(res.CPU, req.CPU)
+ res.Mem -= min(res.Mem, req.Mem)
+
+ if req.HasGPU() {
+ if req.GPU.Index > 0 && req.GPU.Index < len(res.GPU) {
+ gpu := res.GPU[req.GPU.Index]
+ gpu.Usage -= min(gpu.Usage, req.GPU.Usage)
+ gpu.Encoder -= min(gpu.Encoder, req.GPU.Encoder)
+ gpu.Decoder -= min(gpu.Decoder, req.GPU.Decoder)
+ gpu.Mem -= min(gpu.Mem, req.GPU.Mem)
+ res.GPU[req.GPU.Index] = gpu
+ }
}
+
r.nodes[nodeid] = res
}
// Move adjusts the resources from the target and source node according to the cpu and memory utilization.
-func (r *resourcePlanner) Move(target, source string, cpu float64, mem uint64) {
- r.Add(target, cpu, mem)
- r.Remove(source, cpu, mem)
+func (r *resourcePlanner) Move(target, source string, req Resources) {
+ r.Add(target, req)
+ r.Remove(source, req)
}
func (r *resourcePlanner) Map() map[string]node.Resources {
return r.nodes
}
+
+func (r *resourcePlanner) Blocked() []string {
+ nodes := []string{}
+
+ for nodeid := range r.blocked {
+ nodes = append(nodes, nodeid)
+ }
+
+ return nodes
+}
diff --git a/cluster/resources_test.go b/cluster/resources_test.go
new file mode 100644
index 00000000..2f938a31
--- /dev/null
+++ b/cluster/resources_test.go
@@ -0,0 +1,603 @@
+package cluster
+
+import (
+ "testing"
+
+ "github.com/datarhei/core/v16/cluster/node"
+ "github.com/stretchr/testify/require"
+)
+
+func TestResources(t *testing.T) {
+ r := Resources{
+ CPU: 1,
+ Mem: 1,
+ }
+
+ require.False(t, r.HasGPU())
+
+ r.GPU = ResourcesGPU{
+ Index: 0,
+ Usage: 1,
+ Encoder: 0,
+ Decoder: 0,
+ Mem: 1,
+ }
+
+ require.True(t, r.HasGPU())
+}
+
+func TestResourcePlanner(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ "node2": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 85,
+ Mem: 11,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ "node2": {
+ NCPU: 1,
+ CPU: 85,
+ Mem: 11,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerBlocked(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "degraded",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ "node2": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 85,
+ Mem: 11,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ require.Equal(t, []string{"node1"}, planner.Blocked())
+}
+
+func TestResourcePlannerThrottling(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ "node2": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 85,
+ Mem: 11,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ require.True(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ }))
+
+ planner.Throttling("node1", true)
+
+ require.False(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ }))
+
+ planner.Throttling("node1", false)
+
+ require.True(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ }))
+}
+
+func TestRecourcePlannerHasNodeEnough(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 5,
+ MemLimit: 90,
+ Usage: 53,
+ UsageLimit: 90,
+ Encoder: 32,
+ Decoder: 26,
+ },
+ {
+ Mem: 85,
+ MemLimit: 90,
+ Usage: 64,
+ UsageLimit: 90,
+ Encoder: 43,
+ Decoder: 12,
+ },
+ },
+ },
+ },
+ "node2": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 85,
+ Mem: 11,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 5,
+ MemLimit: 90,
+ Usage: 53,
+ UsageLimit: 90,
+ Encoder: 32,
+ Decoder: 26,
+ },
+ },
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ require.True(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ }))
+
+ require.False(t, planner.HasNodeEnough("node2", Resources{
+ CPU: 30,
+ Mem: 5,
+ }))
+
+ require.True(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ GPU: ResourcesGPU{
+ Usage: 0,
+ Encoder: 0,
+ Decoder: 0,
+ Mem: 50,
+ },
+ }))
+
+ require.False(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ GPU: ResourcesGPU{
+ Usage: 0,
+ Encoder: 0,
+ Decoder: 0,
+ Mem: 86,
+ },
+ }))
+
+ require.True(t, planner.HasNodeEnough("node1", Resources{
+ CPU: 30,
+ Mem: 5,
+ GPU: ResourcesGPU{
+ Usage: 0,
+ Encoder: 50,
+ Decoder: 0,
+ Mem: 50,
+ },
+ }))
+}
+
+func TestResourcePlannerAdd(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Add("node1", Resources{
+ CPU: 42,
+ Mem: 33,
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 49,
+ Mem: 68,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerNoGPUAddGPU(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Add("node1", Resources{
+ CPU: 42,
+ Mem: 33,
+ GPU: ResourcesGPU{
+ Index: 0,
+ Usage: 1,
+ Encoder: 2,
+ Decoder: 3,
+ Mem: 4,
+ },
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 49,
+ Mem: 68,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerAddGPU(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 7,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 0,
+ MemLimit: 0,
+ Usage: 0,
+ UsageLimit: 0,
+ Encoder: 0,
+ Decoder: 0,
+ },
+ {
+ Mem: 0,
+ MemLimit: 100,
+ Usage: 0,
+ UsageLimit: 100,
+ Encoder: 0,
+ Decoder: 0,
+ },
+ },
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Add("node1", Resources{
+ CPU: 42,
+ Mem: 33,
+ GPU: ResourcesGPU{
+ Usage: 1,
+ Encoder: 2,
+ Decoder: 3,
+ Mem: 4,
+ },
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 49,
+ Mem: 68,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 0,
+ MemLimit: 0,
+ Usage: 0,
+ UsageLimit: 0,
+ Encoder: 0,
+ Decoder: 0,
+ },
+ {
+ Mem: 4,
+ MemLimit: 100,
+ Usage: 1,
+ UsageLimit: 100,
+ Encoder: 2,
+ Decoder: 3,
+ },
+ },
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerRemove(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 53,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Remove("node1", Resources{
+ CPU: 13,
+ Mem: 20,
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 40,
+ Mem: 15,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerRemoveTooMuch(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 53,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Remove("node1", Resources{
+ CPU: 100,
+ Mem: 100,
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 0,
+ Mem: 0,
+ CPULimit: 90,
+ MemLimit: 90,
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerRemoveGPU(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 53,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 4,
+ MemLimit: 100,
+ Usage: 1,
+ UsageLimit: 100,
+ Encoder: 2,
+ Decoder: 3,
+ },
+ {
+ Mem: 23,
+ MemLimit: 100,
+ Usage: 43,
+ UsageLimit: 100,
+ Encoder: 95,
+ Decoder: 12,
+ },
+ },
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Remove("node1", Resources{
+ CPU: 13,
+ Mem: 20,
+ GPU: ResourcesGPU{
+ Index: 1,
+ Usage: 3,
+ Encoder: 40,
+ Decoder: 0,
+ Mem: 5,
+ },
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 40,
+ Mem: 15,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 4,
+ MemLimit: 100,
+ Usage: 1,
+ UsageLimit: 100,
+ Encoder: 2,
+ Decoder: 3,
+ },
+ {
+ Mem: 18,
+ MemLimit: 100,
+ Usage: 40,
+ UsageLimit: 100,
+ Encoder: 55,
+ Decoder: 12,
+ },
+ },
+ },
+ }, planner.Map())
+}
+
+func TestResourcePlannerRemoveGPUTooMuch(t *testing.T) {
+ nodes := map[string]node.About{
+ "node1": {
+ State: "online",
+ Resources: node.Resources{
+ NCPU: 1,
+ CPU: 53,
+ Mem: 35,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 4,
+ MemLimit: 100,
+ Usage: 1,
+ UsageLimit: 100,
+ Encoder: 2,
+ Decoder: 3,
+ },
+ {
+ Mem: 23,
+ MemLimit: 100,
+ Usage: 43,
+ UsageLimit: 100,
+ Encoder: 95,
+ Decoder: 12,
+ },
+ },
+ },
+ },
+ }
+
+ planner := NewResourcePlanner(nodes)
+
+ planner.Remove("node1", Resources{
+ CPU: 13,
+ Mem: 20,
+ GPU: ResourcesGPU{
+ Index: 1,
+ Usage: 100,
+ Encoder: 100,
+ Decoder: 100,
+ Mem: 100,
+ },
+ })
+
+ require.Equal(t, map[string]node.Resources{
+ "node1": {
+ NCPU: 1,
+ CPU: 40,
+ Mem: 15,
+ CPULimit: 90,
+ MemLimit: 90,
+ GPU: []node.ResourcesGPU{
+ {
+ Mem: 4,
+ MemLimit: 100,
+ Usage: 1,
+ UsageLimit: 100,
+ Encoder: 2,
+ Decoder: 3,
+ },
+ {
+ Mem: 0,
+ MemLimit: 100,
+ Usage: 0,
+ UsageLimit: 100,
+ Encoder: 0,
+ Decoder: 0,
+ },
+ },
+ },
+ }, planner.Map())
+}
diff --git a/config/config.go b/config/config.go
index 40968369..da707f3c 100644
--- a/config/config.go
+++ b/config/config.go
@@ -94,6 +94,7 @@ func (d *Config) Clone() *Config {
data.Log = d.Log
data.DB = d.DB
data.Host = d.Host
+ data.Compress = d.Compress
data.API = d.API
data.TLS = d.TLS
data.Storage = d.Storage
@@ -113,6 +114,9 @@ func (d *Config) Clone() *Config {
data.Host.Name = slices.Copy(d.Host.Name)
+ data.Compress.Encoding = slices.Copy(d.Compress.Encoding)
+ data.Compress.MimeTypes = slices.Copy(d.Compress.MimeTypes)
+
data.API.Access.HTTP.Allow = slices.Copy(d.API.Access.HTTP.Allow)
data.API.Access.HTTP.Block = slices.Copy(d.API.Access.HTTP.Block)
data.API.Access.HTTPS.Allow = slices.Copy(d.API.Access.HTTPS.Allow)
@@ -164,6 +168,21 @@ func (d *Config) init() {
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
+ d.vars.Register(value.NewStringList(&d.Compress.Encoding, []string{"gzip"}, ","), "compress.encoding", "CORE_COMPRESS_ENCODING", nil, "Comma separated list of content encodings", false, false)
+ d.vars.Register(value.NewStringList(&d.Compress.MimeTypes, []string{
+ "text/plain",
+ "text/html",
+ "text/css",
+ "text/javascript",
+ "application/json",
+ "application/x-mpegurl",
+ "application/vnd.apple.mpegurl",
+ "image/svg+xml",
+ "text/event-stream",
+ "application/x-json-stream",
+ }, ","), "compress.mimetypes", "CORE_COMPRESS_MIMETYPES", nil, "Comma separated list of mimetypes to compress", false, false)
+ d.vars.Register(value.NewInt(&d.Compress.MinLength, 1000), "compress.min_length", "CORE_COMPRESS_MIN_LENGTH", nil, "Minimum size before compression will be used", false, false)
+
// API
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
@@ -193,7 +212,7 @@ func (d *Config) init() {
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEY_FILE", []string{"CORE_TLS_KEYFILE"}, "Path to key file in PEM format", false, false)
// Storage
- d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
+ d.vars.Register(value.NewFile(&d.Storage.MimeTypesFile, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
// Storage (Disk)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
@@ -287,8 +306,10 @@ func (d *Config) init() {
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
// Resources
- d.vars.Register(value.NewFloat(&d.Resources.MaxCPUUsage, 0), "resources.max_cpu_usage", "CORE_RESOURCES_MAX_CPU_USAGE", nil, "Maximum system CPU usage in percent, from 0 (no limit) to 100", false, false)
- d.vars.Register(value.NewFloat(&d.Resources.MaxMemoryUsage, 0), "resources.max_memory_usage", "CORE_RESOURCES_MAX_MEMORY_USAGE", nil, "Maximum system usage in percent, from 0 (no limit) to 100", false, false)
+ d.vars.Register(value.NewFloatRange(&d.Resources.MaxCPUUsage, 0, 0, 100), "resources.max_cpu_usage", "CORE_RESOURCES_MAX_CPU_USAGE", nil, "Maximum system CPU usage in percent, from 0 (no limit) to 100", false, false)
+ d.vars.Register(value.NewFloatRange(&d.Resources.MaxMemoryUsage, 0, 0, 100), "resources.max_memory_usage", "CORE_RESOURCES_MAX_MEMORY_USAGE", nil, "Maximum system usage in percent, from 0 (no limit) to 100", false, false)
+ d.vars.Register(value.NewFloatRange(&d.Resources.MaxGPUUsage, 0, 0, 100), "resources.max_gpu_usage", "CORE_RESOURCES_MAX_GPU_USAGE", nil, "Maximum general, encoder, and decoder GPU usage in percent per GPU, from 0 (no limit) to 100", false, false)
+ d.vars.Register(value.NewFloatRange(&d.Resources.MaxGPUMemoryUsage, 0, 0, 100), "resources.max_gpu_memory_usage", "CORE_RESOURCES_MAX_GPU_MEMORY_USAGE", nil, "Maximum GPU memory usage in percent per GPU, from 0 (no limit) to 100", false, false)
// Cluster
d.vars.Register(value.NewBool(&d.Cluster.Enable, false), "cluster.enable", "CORE_CLUSTER_ENABLE", nil, "Enable cluster mode", false, false)
@@ -476,17 +497,6 @@ func (d *Config) Validate(resetLogs bool) {
}
}
- // If resource limits are given, all values must be set
- if d.Resources.MaxCPUUsage > 0 || d.Resources.MaxMemoryUsage > 0 {
- if d.Resources.MaxCPUUsage <= 0 || d.Resources.MaxCPUUsage > 100 {
- d.vars.Log("error", "resources.max_cpu_usage", "must be greater than 0 and smaller or equal to 100")
- }
-
- if d.Resources.MaxMemoryUsage <= 0 {
- d.vars.Log("error", "resources.max_memory_usage", "must be greater than 0 and smaller or equal to 100")
- }
- }
-
// If cluster mode is enabled, a proper address must be provided
if d.Cluster.Enable {
if len(d.Cluster.Address) == 0 {
diff --git a/config/data.go b/config/data.go
index be3e4056..59461836 100644
--- a/config/data.go
+++ b/config/data.go
@@ -32,6 +32,11 @@ type Data struct {
Name []string `json:"name"`
Auto bool `json:"auto"`
} `json:"host"`
+ Compress struct {
+ Encoding []string `json:"encoding"`
+ MimeTypes []string `json:"mimetypes"`
+ MinLength int `json:"min_length" jsonschema:"minimum=0"`
+ } `json:"compress"`
API struct {
ReadOnly bool `json:"read_only"`
Access struct {
@@ -100,7 +105,7 @@ type Data struct {
CORS struct {
Origins []string `json:"origins"`
} `json:"cors"`
- MimeTypes string `json:"mimetypes_file"`
+ MimeTypesFile string `json:"mimetypes_file"`
} `json:"storage"`
RTMP struct {
Enable bool `json:"enable"`
@@ -179,8 +184,10 @@ type Data struct {
UIPath string `json:"ui_path"`
} `json:"router"`
Resources struct {
- MaxCPUUsage float64 `json:"max_cpu_usage"` // percent 0-100
- MaxMemoryUsage float64 `json:"max_memory_usage"` // percent 0-100
+ MaxCPUUsage float64 `json:"max_cpu_usage"` // percent 0-100
+ MaxMemoryUsage float64 `json:"max_memory_usage"` // percent 0-100
+ MaxGPUUsage float64 `json:"max_gpu_usage"` // percent 0-100
+ MaxGPUMemoryUsage float64 `json:"max_gpu_memory_usage"` // percent 0-100
} `json:"resources"`
Cluster struct {
Enable bool `json:"enable"`
@@ -260,7 +267,7 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
data.Router.BlockedPrefixes = slices.Copy(d.Router.BlockedPrefixes)
data.Router.Routes = copy.StringMap(d.Router.Routes)
- data.Storage.MimeTypes = d.Storage.MimeTypes
+ data.Storage.MimeTypesFile = d.Storage.MimeTypes
data.Storage.CORS = d.Storage.CORS
data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins)
@@ -368,7 +375,7 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
data.TLS.CertFile = d.TLS.CertFile
data.TLS.KeyFile = d.TLS.KeyFile
- data.Storage.MimeTypes = d.Storage.MimeTypes
+ data.Storage.MimeTypes = d.Storage.MimeTypesFile
data.Storage.CORS = d.Storage.CORS
data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins)
diff --git a/config/value/primitives.go b/config/value/primitives.go
index 4d1258fd..4c1ae54a 100644
--- a/config/value/primitives.go
+++ b/config/value/primitives.go
@@ -1,6 +1,7 @@
package value
import (
+ "fmt"
"sort"
"strconv"
"strings"
@@ -310,3 +311,56 @@ func (u *Float64) Validate() error {
func (u *Float64) IsEmpty() bool {
return float64(*u) == 0
}
+
+// float64 range
+
+type Float64Range struct {
+ p *float64
+ from float64
+ to float64
+}
+
+func NewFloatRange(p *float64, val, from, to float64) *Float64Range {
+ v := &Float64Range{
+ p: p,
+ from: from,
+ to: to,
+ }
+
+ *p = val
+
+ return v
+}
+
+func (s *Float64Range) Set(val string) error {
+ v, err := strconv.ParseFloat(val, 64)
+ if err != nil {
+ return err
+ }
+
+ *s.p = v
+
+ return nil
+}
+
+func (s *Float64Range) String() string {
+ if s.IsEmpty() {
+ return "(empty)"
+ }
+
+ return fmt.Sprintf("%.3f", *s.p)
+}
+
+func (s *Float64Range) Validate() error {
+ val := *s.p
+
+ if val < s.from || val > s.to {
+ return fmt.Errorf("value %f is not in range [%f, %f]", val, s.from, s.to)
+ }
+
+ return nil
+}
+
+func (s *Float64Range) IsEmpty() bool {
+ return *s.p == 0
+}
diff --git a/config/value/primitives_test.go b/config/value/primitives_test.go
index 4406d8b0..2ee865ff 100644
--- a/config/value/primitives_test.go
+++ b/config/value/primitives_test.go
@@ -165,3 +165,29 @@ func TestFloat64Value(t *testing.T) {
require.Equal(t, float64(77.7), x)
}
+
+func TestFloat64RangeValue(t *testing.T) {
+ var x float64
+
+ val := NewFloatRange(&x, 11.1, 0, 100)
+
+ require.Equal(t, "11.100", val.String())
+ require.NoError(t, val.Validate())
+ require.Equal(t, false, val.IsEmpty())
+
+ x = 42.5
+
+ require.Equal(t, "42.500", val.String())
+ require.NoError(t, val.Validate())
+ require.Equal(t, false, val.IsEmpty())
+
+ val.Set("77.7")
+
+ require.Equal(t, float64(77.7), x)
+
+ val.Set("101.9")
+
+ require.Equal(t, "101.900", val.String())
+ require.Error(t, val.Validate())
+ require.Equal(t, false, val.IsEmpty())
+}
diff --git a/docs/docs.go b/docs/docs.go
index ee197903..8f175707 100644
--- a/docs/docs.go
+++ b/docs/docs.go
@@ -1,5 +1,4 @@
-// Code generated by swaggo/swag. DO NOT EDIT.
-
+// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
@@ -404,6 +403,46 @@ const docTemplate = `{
}
}
},
+ "/api/v3/cluster/events": {
+ "post": {
+ "security": [
+ {
+ "ApiKeyAuth": []
+ }
+ ],
+ "description": "Stream of events of whats happening on each node in the cluster",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "text/event-stream",
+ "application/x-json-stream"
+ ],
+ "tags": [
+ "v16.?.?"
+ ],
+ "summary": "Stream of events",
+ "operationId": "cluster-3-events",
+ "parameters": [
+ {
+ "description": "Event filters",
+ "name": "filters",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/api.EventFilters"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.Event"
+ }
+ }
+ }
+ }
+ },
"/api/v3/cluster/fs/{storage}": {
"get": {
"security": [
@@ -1540,7 +1579,7 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/api.Version"
+ "$ref": "#/definitions/api.AboutVersion"
}
},
"404": {
@@ -5039,6 +5078,14 @@ const docTemplate = `{
"type": "integer",
"format": "uint64"
},
+ "channels": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "codec": {
+ "description": "Codec parameter",
+ "type": "string"
+ },
"drop": {
"type": "integer",
"format": "uint64"
@@ -5057,9 +5104,19 @@ const docTemplate = `{
"gop": {
"type": "string"
},
+ "height": {
+ "type": "integer",
+ "format": "uint64"
+ },
"input": {
"$ref": "#/definitions/api.AVstreamIO"
},
+ "layout": {
+ "type": "string"
+ },
+ "level": {
+ "type": "integer"
+ },
"looping": {
"type": "boolean"
},
@@ -5073,9 +5130,26 @@ const docTemplate = `{
"output": {
"$ref": "#/definitions/api.AVstreamIO"
},
+ "pix_fmt": {
+ "type": "string"
+ },
+ "profile": {
+ "type": "integer"
+ },
"queue": {
"type": "integer",
"format": "uint64"
+ },
+ "sample_fmt": {
+ "type": "string"
+ },
+ "sampling_hz": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "width": {
+ "type": "integer",
+ "format": "uint64"
}
}
},
@@ -5123,11 +5197,119 @@ const docTemplate = `{
"name": {
"type": "string"
},
+ "resources": {
+ "$ref": "#/definitions/api.AboutResources"
+ },
"uptime_seconds": {
"type": "integer"
},
"version": {
- "$ref": "#/definitions/api.Version"
+ "$ref": "#/definitions/api.AboutVersion"
+ }
+ }
+ },
+ "api.AboutGPUResources": {
+ "type": "object",
+ "properties": {
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "usage_decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_general": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "api.AboutResources": {
+ "type": "object",
+ "properties": {
+ "cpu_core": {
+ "description": "Current CPU load of the core itself, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_limit": {
+ "description": "Defined CPU load limit, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_used": {
+ "description": "Current CPU load, 0-100*ncpu",
+ "type": "number"
+ },
+ "gpu": {
+ "description": "GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.AboutGPUResources"
+ }
+ },
+ "is_throttling": {
+ "description": "Whether this core is currently throttling",
+ "type": "boolean"
+ },
+ "memory_core_bytes": {
+ "description": "Current used memory of the core itself in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "ncpu": {
+ "description": "Number of CPU on this node",
+ "type": "number"
+ }
+ }
+ },
+ "api.AboutVersion": {
+ "type": "object",
+ "properties": {
+ "arch": {
+ "type": "string"
+ },
+ "build_date": {
+ "description": "RFC3339",
+ "type": "string"
+ },
+ "compiler": {
+ "type": "string"
+ },
+ "number": {
+ "type": "string"
+ },
+ "repository_branch": {
+ "type": "string"
+ },
+ "repository_commit": {
+ "type": "string"
}
}
},
@@ -5302,6 +5484,39 @@ const docTemplate = `{
}
}
},
+ "api.ClusterNodeGPUResources": {
+ "type": "object",
+ "properties": {
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "usage_decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_general": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
"api.ClusterNodeID": {
"type": "object",
"properties": {
@@ -5313,6 +5528,10 @@ const docTemplate = `{
"api.ClusterNodeResources": {
"type": "object",
"properties": {
+ "cpu_core": {
+ "description": "percent 0-100*ncpu",
+ "type": "number"
+ },
"cpu_limit": {
"description": "percent 0-100*npcu",
"type": "number"
@@ -5324,13 +5543,28 @@ const docTemplate = `{
"error": {
"type": "string"
},
+ "gpu": {
+ "description": "GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.ClusterNodeGPUResources"
+ }
+ },
"is_throttling": {
"type": "boolean"
},
+ "memory_core_bytes": {
+ "description": "bytes",
+ "type": "integer"
+ },
"memory_limit_bytes": {
"description": "bytes",
"type": "integer"
},
+ "memory_total_bytes": {
+ "description": "bytes",
+ "type": "integer"
+ },
"memory_used_bytes": {
"description": "bytes",
"type": "integer"
@@ -5572,6 +5806,26 @@ const docTemplate = `{
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -5764,6 +6018,14 @@ const docTemplate = `{
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -6089,6 +6351,9 @@ const docTemplate = `{
"caller": {
"type": "string"
},
+ "core_id": {
+ "type": "string"
+ },
"data": {
"type": "object",
"additionalProperties": {
@@ -6116,6 +6381,9 @@ const docTemplate = `{
"caller": {
"type": "string"
},
+ "core_id": {
+ "type": "string"
+ },
"data": {
"type": "object",
"additionalProperties": {
@@ -6894,13 +7162,33 @@ const docTemplate = `{
"type": "object",
"properties": {
"cpu_usage": {
+ "description": "percent 0-100*ncpu",
+ "type": "number"
+ },
+ "gpu_decoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "gpu_encoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "gpu_memory_mbytes": {
+ "description": "megabytes",
+ "type": "integer",
+ "format": "uint64"
+ },
+ "gpu_usage": {
+ "description": "percent 0-100",
"type": "number"
},
"memory_mbytes": {
+ "description": "megabytes",
"type": "integer",
"format": "uint64"
},
"waitfor_seconds": {
+ "description": "seconds",
"type": "integer",
"format": "uint64"
}
@@ -7070,6 +7358,9 @@ const docTemplate = `{
"cpu_usage": {
"$ref": "#/definitions/api.ProcessUsageCPU"
},
+ "gpu": {
+ "$ref": "#/definitions/api.ProcessUsageGPU"
+ },
"memory_bytes": {
"$ref": "#/definitions/api.ProcessUsageMemory"
}
@@ -7098,12 +7389,71 @@ const docTemplate = `{
}
}
},
- "api.ProcessUsageMemory": {
+ "api.ProcessUsageGPU": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ },
+ "encoder": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ },
+ "index": {
+ "type": "integer"
+ },
+ "memory_bytes": {
+ "$ref": "#/definitions/api.ProcessUsageGPUMemory"
+ },
+ "usage": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ }
+ }
+ },
+ "api.ProcessUsageGPUMemory": {
+ "type": "object",
+ "properties": {
+ "avg": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "cur": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "limit": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "max": {
+ "type": "integer",
+ "format": "uint64"
+ }
+ }
+ },
+ "api.ProcessUsageGPUUsage": {
"type": "object",
"properties": {
"avg": {
"type": "number"
},
+ "cur": {
+ "type": "number"
+ },
+ "limit": {
+ "type": "number"
+ },
+ "max": {
+ "type": "number"
+ }
+ }
+ },
+ "api.ProcessUsageMemory": {
+ "type": "object",
+ "properties": {
+ "avg": {
+ "type": "integer",
+ "format": "uint64"
+ },
"cur": {
"type": "integer",
"format": "uint64"
@@ -7243,6 +7593,9 @@ const docTemplate = `{
"layout": {
"type": "string"
},
+ "level": {
+ "type": "integer"
+ },
"packet": {
"type": "integer",
"format": "uint64"
@@ -7254,11 +7607,17 @@ const docTemplate = `{
"pps": {
"type": "number"
},
+ "profile": {
+ "type": "integer"
+ },
"q": {
"type": "number"
},
- "sampling_hz": {
+ "sample_fmt": {
"description": "Audio",
+ "type": "string"
+ },
+ "sampling_hz": {
"type": "integer",
"format": "uint64"
},
@@ -7944,6 +8303,26 @@ const docTemplate = `{
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -8136,6 +8515,14 @@ const docTemplate = `{
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -8671,30 +9058,6 @@ const docTemplate = `{
}
}
},
- "api.Version": {
- "type": "object",
- "properties": {
- "arch": {
- "type": "string"
- },
- "build_date": {
- "description": "RFC3339",
- "type": "string"
- },
- "compiler": {
- "type": "string"
- },
- "number": {
- "type": "string"
- },
- "repository_branch": {
- "type": "string"
- },
- "repository_commit": {
- "type": "string"
- }
- }
- },
"api.WidgetProcess": {
"type": "object",
"properties": {
diff --git a/docs/swagger.json b/docs/swagger.json
index ba1fbbe4..448d8718 100644
--- a/docs/swagger.json
+++ b/docs/swagger.json
@@ -396,6 +396,46 @@
}
}
},
+ "/api/v3/cluster/events": {
+ "post": {
+ "security": [
+ {
+ "ApiKeyAuth": []
+ }
+ ],
+ "description": "Stream of events of whats happening on each node in the cluster",
+ "consumes": [
+ "application/json"
+ ],
+ "produces": [
+ "text/event-stream",
+ "application/x-json-stream"
+ ],
+ "tags": [
+ "v16.?.?"
+ ],
+ "summary": "Stream of events",
+ "operationId": "cluster-3-events",
+ "parameters": [
+ {
+ "description": "Event filters",
+ "name": "filters",
+ "in": "body",
+ "schema": {
+ "$ref": "#/definitions/api.EventFilters"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "schema": {
+ "$ref": "#/definitions/api.Event"
+ }
+ }
+ }
+ }
+ },
"/api/v3/cluster/fs/{storage}": {
"get": {
"security": [
@@ -1532,7 +1572,7 @@
"200": {
"description": "OK",
"schema": {
- "$ref": "#/definitions/api.Version"
+ "$ref": "#/definitions/api.AboutVersion"
}
},
"404": {
@@ -5031,6 +5071,14 @@
"type": "integer",
"format": "uint64"
},
+ "channels": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "codec": {
+ "description": "Codec parameter",
+ "type": "string"
+ },
"drop": {
"type": "integer",
"format": "uint64"
@@ -5049,9 +5097,19 @@
"gop": {
"type": "string"
},
+ "height": {
+ "type": "integer",
+ "format": "uint64"
+ },
"input": {
"$ref": "#/definitions/api.AVstreamIO"
},
+ "layout": {
+ "type": "string"
+ },
+ "level": {
+ "type": "integer"
+ },
"looping": {
"type": "boolean"
},
@@ -5065,9 +5123,26 @@
"output": {
"$ref": "#/definitions/api.AVstreamIO"
},
+ "pix_fmt": {
+ "type": "string"
+ },
+ "profile": {
+ "type": "integer"
+ },
"queue": {
"type": "integer",
"format": "uint64"
+ },
+ "sample_fmt": {
+ "type": "string"
+ },
+ "sampling_hz": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "width": {
+ "type": "integer",
+ "format": "uint64"
}
}
},
@@ -5115,11 +5190,119 @@
"name": {
"type": "string"
},
+ "resources": {
+ "$ref": "#/definitions/api.AboutResources"
+ },
"uptime_seconds": {
"type": "integer"
},
"version": {
- "$ref": "#/definitions/api.Version"
+ "$ref": "#/definitions/api.AboutVersion"
+ }
+ }
+ },
+ "api.AboutGPUResources": {
+ "type": "object",
+ "properties": {
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "usage_decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_general": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
+ "api.AboutResources": {
+ "type": "object",
+ "properties": {
+ "cpu_core": {
+ "description": "Current CPU load of the core itself, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_limit": {
+ "description": "Defined CPU load limit, 0-100*ncpu",
+ "type": "number"
+ },
+ "cpu_used": {
+ "description": "Current CPU load, 0-100*ncpu",
+ "type": "number"
+ },
+ "gpu": {
+ "description": "GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.AboutGPUResources"
+ }
+ },
+ "is_throttling": {
+ "description": "Whether this core is currently throttling",
+ "type": "boolean"
+ },
+ "memory_core_bytes": {
+ "description": "Current used memory of the core itself in bytes",
+ "type": "integer"
+ },
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "ncpu": {
+ "description": "Number of CPU on this node",
+ "type": "number"
+ }
+ }
+ },
+ "api.AboutVersion": {
+ "type": "object",
+ "properties": {
+ "arch": {
+ "type": "string"
+ },
+ "build_date": {
+ "description": "RFC3339",
+ "type": "string"
+ },
+ "compiler": {
+ "type": "string"
+ },
+ "number": {
+ "type": "string"
+ },
+ "repository_branch": {
+ "type": "string"
+ },
+ "repository_commit": {
+ "type": "string"
}
}
},
@@ -5294,6 +5477,39 @@
}
}
},
+ "api.ClusterNodeGPUResources": {
+ "type": "object",
+ "properties": {
+ "memory_limit_bytes": {
+ "description": "Defined memory limit in bytes",
+ "type": "integer"
+ },
+ "memory_total_bytes": {
+ "description": "Total available memory in bytes",
+ "type": "integer"
+ },
+ "memory_used_bytes": {
+ "description": "Currently used memory in bytes",
+ "type": "integer"
+ },
+ "usage_decoder": {
+ "description": "Current decoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_encoder": {
+ "description": "Current encoder usage, 0-100",
+ "type": "number"
+ },
+ "usage_general": {
+ "description": "Current general usage, 0-100",
+ "type": "number"
+ },
+ "usage_limit": {
+ "description": "Defined general usage limit, 0-100",
+ "type": "number"
+ }
+ }
+ },
"api.ClusterNodeID": {
"type": "object",
"properties": {
@@ -5305,6 +5521,10 @@
"api.ClusterNodeResources": {
"type": "object",
"properties": {
+ "cpu_core": {
+ "description": "percent 0-100*ncpu",
+ "type": "number"
+ },
"cpu_limit": {
"description": "percent 0-100*npcu",
"type": "number"
@@ -5316,13 +5536,28 @@
"error": {
"type": "string"
},
+ "gpu": {
+ "description": "GPU resources",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/api.ClusterNodeGPUResources"
+ }
+ },
"is_throttling": {
"type": "boolean"
},
+ "memory_core_bytes": {
+ "description": "bytes",
+ "type": "integer"
+ },
"memory_limit_bytes": {
"description": "bytes",
"type": "integer"
},
+ "memory_total_bytes": {
+ "description": "bytes",
+ "type": "integer"
+ },
"memory_used_bytes": {
"description": "bytes",
"type": "integer"
@@ -5564,6 +5799,26 @@
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -5756,6 +6011,14 @@
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -6081,6 +6344,9 @@
"caller": {
"type": "string"
},
+ "core_id": {
+ "type": "string"
+ },
"data": {
"type": "object",
"additionalProperties": {
@@ -6108,6 +6374,9 @@
"caller": {
"type": "string"
},
+ "core_id": {
+ "type": "string"
+ },
"data": {
"type": "object",
"additionalProperties": {
@@ -6886,13 +7155,33 @@
"type": "object",
"properties": {
"cpu_usage": {
+ "description": "percent 0-100*ncpu",
+ "type": "number"
+ },
+ "gpu_decoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "gpu_encoder": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "gpu_memory_mbytes": {
+ "description": "megabytes",
+ "type": "integer",
+ "format": "uint64"
+ },
+ "gpu_usage": {
+ "description": "percent 0-100",
"type": "number"
},
"memory_mbytes": {
+ "description": "megabytes",
"type": "integer",
"format": "uint64"
},
"waitfor_seconds": {
+ "description": "seconds",
"type": "integer",
"format": "uint64"
}
@@ -7062,6 +7351,9 @@
"cpu_usage": {
"$ref": "#/definitions/api.ProcessUsageCPU"
},
+ "gpu": {
+ "$ref": "#/definitions/api.ProcessUsageGPU"
+ },
"memory_bytes": {
"$ref": "#/definitions/api.ProcessUsageMemory"
}
@@ -7090,11 +7382,70 @@
}
}
},
- "api.ProcessUsageMemory": {
+ "api.ProcessUsageGPU": {
+ "type": "object",
+ "properties": {
+ "decoder": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ },
+ "encoder": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ },
+ "index": {
+ "type": "integer"
+ },
+ "memory_bytes": {
+ "$ref": "#/definitions/api.ProcessUsageGPUMemory"
+ },
+ "usage": {
+ "$ref": "#/definitions/api.ProcessUsageGPUUsage"
+ }
+ }
+ },
+ "api.ProcessUsageGPUMemory": {
"type": "object",
"properties": {
"avg": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "cur": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "limit": {
+ "type": "integer",
+ "format": "uint64"
+ },
+ "max": {
+ "type": "integer",
+ "format": "uint64"
+ }
+ }
+ },
+ "api.ProcessUsageGPUUsage": {
+ "type": "object",
+ "properties": {
+ "avg": {
+ "type": "number"
+ },
+ "cur": {
+ "type": "number"
+ },
+ "limit": {
+ "type": "number"
+ },
+ "max": {
"type": "number"
+ }
+ }
+ },
+ "api.ProcessUsageMemory": {
+ "type": "object",
+ "properties": {
+ "avg": {
+ "type": "integer",
+ "format": "uint64"
},
"cur": {
"type": "integer",
@@ -7235,6 +7586,9 @@
"layout": {
"type": "string"
},
+ "level": {
+ "type": "integer"
+ },
"packet": {
"type": "integer",
"format": "uint64"
@@ -7246,11 +7600,17 @@
"pps": {
"type": "number"
},
+ "profile": {
+ "type": "integer"
+ },
"q": {
"type": "number"
},
- "sampling_hz": {
+ "sample_fmt": {
"description": "Audio",
+ "type": "string"
+ },
+ "sampling_hz": {
"type": "integer",
"format": "uint64"
},
@@ -7936,6 +8296,26 @@
}
}
},
+ "compress": {
+ "type": "object",
+ "properties": {
+ "encoding": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mimetypes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "min_length": {
+ "type": "integer"
+ }
+ }
+ },
"created_at": {
"description": "When this config has been persisted",
"type": "string"
@@ -8128,6 +8508,14 @@
"description": "percent 0-100",
"type": "number"
},
+ "max_gpu_memory_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
+ "max_gpu_usage": {
+ "description": "percent 0-100",
+ "type": "number"
+ },
"max_memory_usage": {
"description": "percent 0-100",
"type": "number"
@@ -8663,30 +9051,6 @@
}
}
},
- "api.Version": {
- "type": "object",
- "properties": {
- "arch": {
- "type": "string"
- },
- "build_date": {
- "description": "RFC3339",
- "type": "string"
- },
- "compiler": {
- "type": "string"
- },
- "number": {
- "type": "string"
- },
- "repository_branch": {
- "type": "string"
- },
- "repository_commit": {
- "type": "string"
- }
- }
- },
"api.WidgetProcess": {
"type": "object",
"properties": {
diff --git a/docs/swagger.yaml b/docs/swagger.yaml
index 61e9d8b2..5e3730cf 100644
--- a/docs/swagger.yaml
+++ b/docs/swagger.yaml
@@ -5,6 +5,12 @@ definitions:
aqueue:
format: uint64
type: integer
+ channels:
+ format: uint64
+ type: integer
+ codec:
+ description: Codec parameter
+ type: string
drop:
format: uint64
type: integer
@@ -18,8 +24,15 @@ definitions:
type: integer
gop:
type: string
+ height:
+ format: uint64
+ type: integer
input:
$ref: '#/definitions/api.AVstreamIO'
+ layout:
+ type: string
+ level:
+ type: integer
looping:
type: boolean
looping_runtime:
@@ -29,9 +42,21 @@ definitions:
type: string
output:
$ref: '#/definitions/api.AVstreamIO'
+ pix_fmt:
+ type: string
+ profile:
+ type: integer
queue:
format: uint64
type: integer
+ sample_fmt:
+ type: string
+ sampling_hz:
+ format: uint64
+ type: integer
+ width:
+ format: uint64
+ type: integer
type: object
api.AVstreamIO:
properties:
@@ -63,10 +88,87 @@ definitions:
type: string
name:
type: string
+ resources:
+ $ref: '#/definitions/api.AboutResources'
uptime_seconds:
type: integer
version:
- $ref: '#/definitions/api.Version'
+ $ref: '#/definitions/api.AboutVersion'
+ type: object
+ api.AboutGPUResources:
+ properties:
+ memory_limit_bytes:
+ description: Defined memory limit in bytes
+ type: integer
+ memory_total_bytes:
+ description: Total available memory in bytes
+ type: integer
+ memory_used_bytes:
+ description: Currently used memory in bytes
+ type: integer
+ usage_decoder:
+ description: Current decoder usage, 0-100
+ type: number
+ usage_encoder:
+ description: Current encoder usage, 0-100
+ type: number
+ usage_general:
+ description: Current general usage, 0-100
+ type: number
+ usage_limit:
+ description: Defined general usage limit, 0-100
+ type: number
+ type: object
+ api.AboutResources:
+ properties:
+ cpu_core:
+ description: Current CPU load of the core itself, 0-100*ncpu
+ type: number
+ cpu_limit:
+ description: Defined CPU load limit, 0-100*ncpu
+ type: number
+ cpu_used:
+ description: Current CPU load, 0-100*ncpu
+ type: number
+ gpu:
+ description: GPU resources
+ items:
+ $ref: '#/definitions/api.AboutGPUResources'
+ type: array
+ is_throttling:
+ description: Whether this core is currently throttling
+ type: boolean
+ memory_core_bytes:
+ description: Current used memory of the core itself in bytes
+ type: integer
+ memory_limit_bytes:
+ description: Defined memory limit in bytes
+ type: integer
+ memory_total_bytes:
+ description: Total available memory in bytes
+ type: integer
+ memory_used_bytes:
+ description: Currently used memory in bytes
+ type: integer
+ ncpu:
+ description: Number of CPU on this node
+ type: number
+ type: object
+ api.AboutVersion:
+ properties:
+ arch:
+ type: string
+ build_date:
+ description: RFC3339
+ type: string
+ compiler:
+ type: string
+ number:
+ type: string
+ repository_branch:
+ type: string
+ repository_commit:
+ type: string
type: object
api.ClusterAbout:
properties:
@@ -181,6 +283,30 @@ definitions:
description: unix timestamp
type: integer
type: object
+ api.ClusterNodeGPUResources:
+ properties:
+ memory_limit_bytes:
+ description: Defined memory limit in bytes
+ type: integer
+ memory_total_bytes:
+ description: Total available memory in bytes
+ type: integer
+ memory_used_bytes:
+ description: Currently used memory in bytes
+ type: integer
+ usage_decoder:
+ description: Current decoder usage, 0-100
+ type: number
+ usage_encoder:
+ description: Current encoder usage, 0-100
+ type: number
+ usage_general:
+ description: Current general usage, 0-100
+ type: number
+ usage_limit:
+ description: Defined general usage limit, 0-100
+ type: number
+ type: object
api.ClusterNodeID:
properties:
id:
@@ -188,6 +314,9 @@ definitions:
type: object
api.ClusterNodeResources:
properties:
+ cpu_core:
+ description: percent 0-100*ncpu
+ type: number
cpu_limit:
description: percent 0-100*npcu
type: number
@@ -196,11 +325,22 @@ definitions:
type: number
error:
type: string
+ gpu:
+ description: GPU resources
+ items:
+ $ref: '#/definitions/api.ClusterNodeGPUResources'
+ type: array
is_throttling:
type: boolean
+ memory_core_bytes:
+ description: bytes
+ type: integer
memory_limit_bytes:
description: bytes
type: integer
+ memory_total_bytes:
+ description: bytes
+ type: integer
memory_used_bytes:
description: bytes
type: integer
@@ -362,6 +502,19 @@ definitions:
format: int64
type: integer
type: object
+ compress:
+ properties:
+ encoding:
+ items:
+ type: string
+ type: array
+ mimetypes:
+ items:
+ type: string
+ type: array
+ min_length:
+ type: integer
+ type: object
created_at:
description: When this config has been persisted
type: string
@@ -493,6 +646,12 @@ definitions:
max_cpu_usage:
description: percent 0-100
type: number
+ max_gpu_memory_usage:
+ description: percent 0-100
+ type: number
+ max_gpu_usage:
+ description: percent 0-100
+ type: number
max_memory_usage:
description: percent 0-100
type: number
@@ -710,6 +869,8 @@ definitions:
properties:
caller:
type: string
+ core_id:
+ type: string
data:
additionalProperties:
type: string
@@ -728,6 +889,8 @@ definitions:
properties:
caller:
type: string
+ core_id:
+ type: string
data:
additionalProperties:
type: string
@@ -1252,11 +1415,27 @@ definitions:
api.ProcessConfigLimits:
properties:
cpu_usage:
+ description: percent 0-100*ncpu
+ type: number
+ gpu_decoder:
+ description: percent 0-100
+ type: number
+ gpu_encoder:
+ description: percent 0-100
+ type: number
+ gpu_memory_mbytes:
+ description: megabytes
+ format: uint64
+ type: integer
+ gpu_usage:
+ description: percent 0-100
type: number
memory_mbytes:
+ description: megabytes
format: uint64
type: integer
waitfor_seconds:
+ description: seconds
format: uint64
type: integer
type: object
@@ -1371,6 +1550,8 @@ definitions:
properties:
cpu_usage:
$ref: '#/definitions/api.ProcessUsageCPU'
+ gpu:
+ $ref: '#/definitions/api.ProcessUsageGPU'
memory_bytes:
$ref: '#/definitions/api.ProcessUsageMemory'
type: object
@@ -1389,10 +1570,50 @@ definitions:
throttling:
type: boolean
type: object
- api.ProcessUsageMemory:
+ api.ProcessUsageGPU:
+ properties:
+ decoder:
+ $ref: '#/definitions/api.ProcessUsageGPUUsage'
+ encoder:
+ $ref: '#/definitions/api.ProcessUsageGPUUsage'
+ index:
+ type: integer
+ memory_bytes:
+ $ref: '#/definitions/api.ProcessUsageGPUMemory'
+ usage:
+ $ref: '#/definitions/api.ProcessUsageGPUUsage'
+ type: object
+ api.ProcessUsageGPUMemory:
properties:
avg:
+ format: uint64
+ type: integer
+ cur:
+ format: uint64
+ type: integer
+ limit:
+ format: uint64
+ type: integer
+ max:
+ format: uint64
+ type: integer
+ type: object
+ api.ProcessUsageGPUUsage:
+ properties:
+ avg:
+ type: number
+ cur:
+ type: number
+ limit:
type: number
+ max:
+ type: number
+ type: object
+ api.ProcessUsageMemory:
+ properties:
+ avg:
+ format: uint64
+ type: integer
cur:
format: uint64
type: integer
@@ -1490,6 +1711,8 @@ definitions:
type: integer
layout:
type: string
+ level:
+ type: integer
packet:
format: uint64
type: integer
@@ -1498,10 +1721,14 @@ definitions:
type: string
pps:
type: number
+ profile:
+ type: integer
q:
type: number
- sampling_hz:
+ sample_fmt:
description: Audio
+ type: string
+ sampling_hz:
format: uint64
type: integer
size_kb:
@@ -2030,6 +2257,19 @@ definitions:
format: int64
type: integer
type: object
+ compress:
+ properties:
+ encoding:
+ items:
+ type: string
+ type: array
+ mimetypes:
+ items:
+ type: string
+ type: array
+ min_length:
+ type: integer
+ type: object
created_at:
description: When this config has been persisted
type: string
@@ -2161,6 +2401,12 @@ definitions:
max_cpu_usage:
description: percent 0-100
type: number
+ max_gpu_memory_usage:
+ description: percent 0-100
+ type: number
+ max_gpu_usage:
+ description: percent 0-100
+ type: number
max_memory_usage:
description: percent 0-100
type: number
@@ -2513,22 +2759,6 @@ definitions:
$ref: '#/definitions/api.GraphMapping'
type: array
type: object
- api.Version:
- properties:
- arch:
- type: string
- build_date:
- description: RFC3339
- type: string
- compiler:
- type: string
- number:
- type: string
- repository_branch:
- type: string
- repository_commit:
- type: string
- type: object
api.WidgetProcess:
properties:
current_sessions:
@@ -2838,6 +3068,31 @@ paths:
summary: List of identities in the cluster
tags:
- v16.?.?
+ /api/v3/cluster/events:
+ post:
+ consumes:
+ - application/json
+ description: Stream of events of whats happening on each node in the cluster
+ operationId: cluster-3-events
+ parameters:
+ - description: Event filters
+ in: body
+ name: filters
+ schema:
+ $ref: '#/definitions/api.EventFilters'
+ produces:
+ - text/event-stream
+ - application/x-json-stream
+ responses:
+ "200":
+ description: OK
+ schema:
+ $ref: '#/definitions/api.Event'
+ security:
+ - ApiKeyAuth: []
+ summary: Stream of events
+ tags:
+ - v16.?.?
/api/v3/cluster/fs/{storage}:
get:
description: List all files on a filesystem. The listing can be ordered by name,
@@ -3577,7 +3832,7 @@ paths:
"200":
description: OK
schema:
- $ref: '#/definitions/api.Version'
+ $ref: '#/definitions/api.AboutVersion'
"404":
description: Not Found
schema:
diff --git a/encoding/json/json.go b/encoding/json/json.go
index 12046c06..51d0f0e8 100644
--- a/encoding/json/json.go
+++ b/encoding/json/json.go
@@ -2,10 +2,9 @@
package json
import (
+ "encoding/json"
"fmt"
"io"
-
- "github.com/goccy/go-json"
)
type UnmarshalTypeError = json.UnmarshalTypeError
diff --git a/ffmpeg/ffmpeg.go b/ffmpeg/ffmpeg.go
index 3b1e9710..b3c4b445 100644
--- a/ffmpeg/ffmpeg.go
+++ b/ffmpeg/ffmpeg.go
@@ -12,6 +12,7 @@ import (
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/process"
+ "github.com/datarhei/core/v16/resources"
"github.com/datarhei/core/v16/session"
)
@@ -29,23 +30,26 @@ type FFmpeg interface {
}
type ProcessConfig struct {
- Reconnect bool // Whether to reconnect
- ReconnectDelay time.Duration // Duration until next reconnect
- StaleTimeout time.Duration // Duration to wait until killing the process if there is no progress in the process
- Timeout time.Duration // Duration to wait until killing the process
- LimitCPU float64 // Kill the process if the CPU usage in percent is above this value.
- LimitMemory uint64 // Kill the process if the memory consumption in bytes is above this value.
- LimitDuration time.Duration // Kill the process if the limits are exceeded for this duration.
- LimitMode string // How to limit the process, "hard" or "soft"
- Scheduler string // A scheduler for starting the process, either a concrete date (RFC3339) or in crontab syntax
- Args []string // Arguments for the process
- Parser process.Parser // Parser for the process output
- Logger log.Logger // Logger
- OnArgs func([]string) []string // Callback before starting the process to retrieve new arguments
- OnBeforeStart func() error // Callback which is called before the process will be started. If error is non-nil, the start will be refused.
- OnStart func() // Callback called after process has been started
- OnExit func(state string) // Callback called after the process stopped with exit state as argument
- OnStateChange func(from, to string) // Callback called on state change
+ Reconnect bool // Whether to reconnect
+ ReconnectDelay time.Duration // Duration until next reconnect
+ StaleTimeout time.Duration // Duration to wait until killing the process if there is no progress in the process
+ Timeout time.Duration // Duration to wait until killing the process
+ LimitCPU float64 // Kill the process if the CPU usage in percent is above this value.
+ LimitMemory uint64 // Kill the process if the memory consumption in bytes is above this value.
+ LimitGPUUsage float64 // Kill the process id the GPU usage (general) in percent is above this value.
+ LimitGPUEncoder float64 // Kill the process id the GPU usage (encoder) in percent is above this value.
+ LimitGPUDecoder float64 // Kill the process id the GPU usage (decoder) in percent is above this value.
+ LimitGPUMemory uint64 // Kill the process if the GPU memory consumption in bytes is above this value.
+ LimitDuration time.Duration // Kill the process if the limits are exceeded for this duration.
+ LimitMode string // How to limit the process, "hard" or "soft"
+ Scheduler string // A scheduler for starting the process, either a concrete date (RFC3339) or in crontab syntax
+ Args []string // Arguments for the process
+ Parser process.Parser // Parser for the process output
+ Logger log.Logger // Logger
+ OnBeforeStart func([]string) ([]string, error) // Callback which is called before the process will be started. The string slice is the list of arguments which can be modified. If error is non-nil, the start will be refused.
+ OnStart func() // Callback called after process has been started
+ OnExit func(state string) // Callback called after the process stopped with exit state as argument
+ OnStateChange func(from, to string) // Callback called on state change
}
// Config is the configuration for ffmpeg that is part of the configuration
@@ -60,6 +64,7 @@ type Config struct {
ValidatorOutput Validator
Portrange net.Portranger
Collector session.Collector
+ Resource resources.Resources
}
type ffmpeg struct {
@@ -77,11 +82,19 @@ type ffmpeg struct {
states process.States
statesLock sync.RWMutex
+
+ resources resources.Resources
}
func New(config Config) (FFmpeg, error) {
f := &ffmpeg{}
+ if config.Resource == nil {
+ return nil, fmt.Errorf("resources are required")
+ }
+
+ f.resources = config.Resource
+
binary, err := exec.LookPath(config.Binary)
if err != nil {
return nil, fmt.Errorf("invalid ffmpeg binary given: %w", err)
@@ -138,23 +151,26 @@ func (f *ffmpeg) New(config ProcessConfig) (process.Process, error) {
}
ffmpeg, err := process.New(process.Config{
- Binary: f.binary,
- Args: config.Args,
- Reconnect: config.Reconnect,
- ReconnectDelay: config.ReconnectDelay,
- StaleTimeout: config.StaleTimeout,
- Timeout: config.Timeout,
- LimitCPU: config.LimitCPU,
- LimitMemory: config.LimitMemory,
- LimitDuration: config.LimitDuration,
- LimitMode: limitMode,
- Scheduler: scheduler,
- Parser: config.Parser,
- Logger: config.Logger,
- OnArgs: config.OnArgs,
- OnBeforeStart: config.OnBeforeStart,
- OnStart: config.OnStart,
- OnExit: config.OnExit,
+ Binary: f.binary,
+ Args: config.Args,
+ Reconnect: config.Reconnect,
+ ReconnectDelay: config.ReconnectDelay,
+ StaleTimeout: config.StaleTimeout,
+ Timeout: config.Timeout,
+ LimitCPU: config.LimitCPU,
+ LimitMemory: config.LimitMemory,
+ LimitGPUUsage: config.LimitGPUUsage,
+ LimitGPUEncoder: config.LimitGPUEncoder,
+ LimitGPUDecoder: config.LimitGPUDecoder,
+ LimitGPUMemory: config.LimitGPUMemory,
+ LimitDuration: config.LimitDuration,
+ LimitMode: limitMode,
+ Scheduler: scheduler,
+ Parser: config.Parser,
+ Logger: config.Logger,
+ OnBeforeStart: config.OnBeforeStart,
+ OnStart: config.OnStart,
+ OnExit: config.OnExit,
OnStateChange: func(from, to string) {
f.statesLock.Lock()
switch to {
@@ -178,6 +194,7 @@ func (f *ffmpeg) New(config ProcessConfig) (process.Process, error) {
config.OnStateChange(from, to)
}
},
+ Resources: f.resources,
})
return ffmpeg, err
diff --git a/ffmpeg/parse/average.go b/ffmpeg/parse/average.go
index 8f715ddd..3d633594 100644
--- a/ffmpeg/parse/average.go
+++ b/ffmpeg/parse/average.go
@@ -3,23 +3,23 @@ package parse
import (
"time"
- "github.com/prep/average"
+ "github.com/datarhei/core/v16/math/average"
)
type averager struct {
- fps *average.SlidingWindow
- pps *average.SlidingWindow
- bitrate *average.SlidingWindow
+ fps *average.SMA
+ pps *average.SMA
+ bitrate *average.SMA
}
func (a *averager) init(window, granularity time.Duration) {
- a.fps, _ = average.New(window, granularity)
- a.pps, _ = average.New(window, granularity)
- a.bitrate, _ = average.New(window, granularity)
+ a.fps, _ = average.NewSMA(window, granularity)
+ a.pps, _ = average.NewSMA(window, granularity)
+ a.bitrate, _ = average.NewSMA(window, granularity)
}
func (a *averager) stop() {
- a.fps.Stop()
- a.pps.Stop()
- a.bitrate.Stop()
+ a.fps.Reset()
+ a.pps.Reset()
+ a.bitrate.Reset()
}
diff --git a/ffmpeg/parse/parser.go b/ffmpeg/parse/parser.go
index a6bf5127..2259159c 100644
--- a/ffmpeg/parse/parser.go
+++ b/ffmpeg/parse/parser.go
@@ -410,23 +410,15 @@ func (p *parser) Parse(line []byte) uint64 {
}
}
- p.averager.main.fps.Add(int64(p.stats.main.diff.frame))
- p.averager.main.pps.Add(int64(p.stats.main.diff.packet))
- p.averager.main.bitrate.Add(int64(p.stats.main.diff.size) * 8)
-
- p.progress.ffmpeg.FPS = p.averager.main.fps.Average(p.averager.window)
- p.progress.ffmpeg.PPS = p.averager.main.pps.Average(p.averager.window)
- p.progress.ffmpeg.Bitrate = p.averager.main.bitrate.Average(p.averager.window)
+ p.progress.ffmpeg.FPS = p.averager.main.fps.AddAndAverage(float64(p.stats.main.diff.frame))
+ p.progress.ffmpeg.PPS = p.averager.main.pps.AddAndAverage(float64(p.stats.main.diff.packet))
+ p.progress.ffmpeg.Bitrate = p.averager.main.bitrate.AddAndAverage(float64(p.stats.main.diff.size) * 8)
if len(p.averager.input) != 0 && len(p.averager.input) == len(p.progress.ffmpeg.Input) {
for i := range p.progress.ffmpeg.Input {
- p.averager.input[i].fps.Add(int64(p.stats.input[i].diff.frame))
- p.averager.input[i].pps.Add(int64(p.stats.input[i].diff.packet))
- p.averager.input[i].bitrate.Add(int64(p.stats.input[i].diff.size) * 8)
-
- p.progress.ffmpeg.Input[i].FPS = p.averager.input[i].fps.Average(p.averager.window)
- p.progress.ffmpeg.Input[i].PPS = p.averager.input[i].pps.Average(p.averager.window)
- p.progress.ffmpeg.Input[i].Bitrate = p.averager.input[i].bitrate.Average(p.averager.window)
+ p.progress.ffmpeg.Input[i].FPS = p.averager.input[i].fps.AddAndAverage(float64(p.stats.input[i].diff.frame))
+ p.progress.ffmpeg.Input[i].PPS = p.averager.input[i].pps.AddAndAverage(float64(p.stats.input[i].diff.packet))
+ p.progress.ffmpeg.Input[i].Bitrate = p.averager.input[i].bitrate.AddAndAverage(float64(p.stats.input[i].diff.size) * 8)
if p.collector.IsCollectableIP(p.process.input[i].IP) {
p.collector.Activate("")
@@ -437,13 +429,9 @@ func (p *parser) Parse(line []byte) uint64 {
if len(p.averager.output) != 0 && len(p.averager.output) == len(p.progress.ffmpeg.Output) {
for i := range p.progress.ffmpeg.Output {
- p.averager.output[i].fps.Add(int64(p.stats.output[i].diff.frame))
- p.averager.output[i].pps.Add(int64(p.stats.output[i].diff.packet))
- p.averager.output[i].bitrate.Add(int64(p.stats.output[i].diff.size) * 8)
-
- p.progress.ffmpeg.Output[i].FPS = p.averager.output[i].fps.Average(p.averager.window)
- p.progress.ffmpeg.Output[i].PPS = p.averager.output[i].pps.Average(p.averager.window)
- p.progress.ffmpeg.Output[i].Bitrate = p.averager.output[i].bitrate.Average(p.averager.window)
+ p.progress.ffmpeg.Output[i].FPS = p.averager.output[i].fps.AddAndAverage(float64(p.stats.output[i].diff.frame))
+ p.progress.ffmpeg.Output[i].PPS = p.averager.output[i].pps.AddAndAverage(float64(p.stats.output[i].diff.packet))
+ p.progress.ffmpeg.Output[i].Bitrate = p.averager.output[i].bitrate.AddAndAverage(float64(p.stats.output[i].diff.size) * 8)
if p.collector.IsCollectableIP(p.process.output[i].IP) {
p.collector.Activate("")
@@ -631,7 +619,7 @@ func (p *parser) Stop(state string, pusage process.Usage) {
usage.CPU.Max = pusage.CPU.Max
usage.CPU.Limit = pusage.CPU.Limit
- usage.Memory.Average = pusage.Memory.Average
+ usage.Memory.Average = uint64(pusage.Memory.Average)
usage.Memory.Max = pusage.Memory.Max
usage.Memory.Limit = pusage.Memory.Limit
@@ -653,7 +641,7 @@ func (p *parser) Progress() Progress {
continue
}
- progress.Input[i].AVstream = av.export()
+ progress.Input[i].AVstream = av.export(io.Type)
}
progress.Started = p.stats.initialized
diff --git a/ffmpeg/parse/parser_test.go b/ffmpeg/parse/parser_test.go
index e041fc4f..3e351f48 100644
--- a/ffmpeg/parse/parser_test.go
+++ b/ffmpeg/parse/parser_test.go
@@ -184,11 +184,11 @@ func TestParserLogHistory(t *testing.T) {
require.Equal(t, Progress{
Started: true,
Frame: 5968,
- FPS: 0, // is calculated with averager
+ FPS: 5968. / 30, // is calculated with averager
Quantizer: 19.4,
Size: 453632,
Time: d.Seconds(),
- Bitrate: 0, // is calculated with averager
+ Bitrate: 443. * 1024 * 8 / 30, // is calculated with averager
Speed: 0.999,
Drop: 3522,
Dup: 87463,
@@ -245,11 +245,11 @@ func TestParserImportLogHistory(t *testing.T) {
require.Equal(t, Progress{
Started: true,
Frame: 42,
- FPS: 0, // is calculated with averager
+ FPS: 5968. / 30, // is calculated with averager
Quantizer: 19.4,
Size: 453632,
Time: d.Seconds(),
- Bitrate: 0, // is calculated with averager
+ Bitrate: 443. * 1024 * 8 / 30, // is calculated with averager
Speed: 0.999,
Drop: 3522,
Dup: 87463,
@@ -312,11 +312,11 @@ func TestParserLogMinimalHistoryLength(t *testing.T) {
require.Equal(t, Progress{
Started: true,
Frame: 5968,
- FPS: 0, // is calculated with averager
+ FPS: 5968. / 30, // is calculated with averager
Quantizer: 19.4,
Size: 453632,
Time: d.Seconds(),
- Bitrate: 0, // is calculated with averager
+ Bitrate: 443. * 1024 * 8 / 30, // is calculated with averager
Speed: 0.999,
Drop: 3522,
Dup: 87463,
@@ -330,11 +330,11 @@ func TestParserLogMinimalHistoryLength(t *testing.T) {
require.Equal(t, Progress{
Started: true,
Frame: 5968,
- FPS: 0, // is calculated with averager
+ FPS: 5968. / 30, // is calculated with averager
Quantizer: 19.4,
Size: 453632,
Time: d.Seconds(),
- Bitrate: 0, // is calculated with averager
+ Bitrate: 443. * 1024 * 8 / 30, // is calculated with averager
Speed: 0.999,
Drop: 3522,
Dup: 87463,
@@ -884,11 +884,11 @@ func TestParserProgressPlayout(t *testing.T) {
Coder: "h264",
Frame: 7,
Keyframe: 1,
- FPS: 0,
+ FPS: 7. / 30,
Packet: 11,
- PPS: 0,
+ PPS: 11. / 30,
Size: 42,
- Bitrate: 0,
+ Bitrate: 42. * 8 / 30,
Pixfmt: "yuvj420p",
Quantizer: 0,
Width: 1280,
@@ -938,11 +938,11 @@ func TestParserProgressPlayout(t *testing.T) {
Coder: "libx264",
Frame: 7,
Keyframe: 1,
- FPS: 0,
+ FPS: 7. / 30,
Packet: 0,
PPS: 0,
Size: 5,
- Bitrate: 0,
+ Bitrate: 5. * 8 / 30,
Extradata: 32,
Pixfmt: "yuvj420p",
Quantizer: 0,
@@ -962,11 +962,149 @@ func TestParserProgressPlayout(t *testing.T) {
Codec: "h264",
Coder: "copy",
Frame: 11,
- FPS: 0,
+ FPS: 11. / 30,
Packet: 11,
+ PPS: 11. / 30,
+ Size: 231424,
+ Bitrate: 231424. * 8 / 30,
+ Pixfmt: "yuvj420p",
+ Quantizer: -1,
+ Width: 1280,
+ Height: 720,
+ Sampling: 0,
+ Layout: "",
+ Channels: 0,
+ AVstream: nil,
+ },
+ },
+ Frame: 7,
+ Packet: 0,
+ FPS: 7. / 30,
+ PPS: 0,
+ Quantizer: 0,
+ Size: 231424,
+ Time: 0.56,
+ Bitrate: 231424. * 8 / 30,
+ Speed: 0.4,
+ Drop: 0,
+ Dup: 0,
+ }, progress)
+}
+
+func TestParserProgressPlayoutVideo(t *testing.T) {
+ parser := New(Config{
+ LogLines: 20,
+ }).(*parser)
+
+ parser.Parse([]byte(`ffmpeg.inputs:[{"url":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`))
+ parser.Parse([]byte(`ffmpeg.outputs:[{"url":"/dev/null","format":"flv","index":0,"stream":0,"type":"video","codec":"h264","coder":"libx264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":25.000000,"pix_fmt":"yuvj420p","width":1280,"height":720},{"url":"/dev/null","format":"mp4","index":1,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","fps":20.666666,"pix_fmt":"yuvj420p","width":1280,"height":720}]`))
+ parser.Parse([]byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":11,"size_kb":226,"size_bytes":42}],"outputs":[{"index":0,"stream":0,"frame":7,"keyframe":1,"packet":0,"q":0.0,"size_kb":0,"size_bytes":5,"extradata_size_bytes":32},{"index":1,"stream":0,"frame":11,"packet":11,"q":-1.0,"size_kb":226}],"frame":7,"packet":0,"q":0.0,"size_kb":226,"time":"0h0m0.56s","speed":0.4,"dup":0,"drop":0}`))
+ parser.Parse([]byte(`avstream.progress:{"id":"playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","url":"https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8","stream":0,"queue":140,"aqueue":42,"dup":5,"drop":8,"enc":7,"looping":true,"duplicating":true,"gop":"key","mode":"live","input":{"state":"running","packet":148,"size_kb":1529,"time":5},"output":{"state":"running","packet":8,"size_kb":128,"time":1},"swap":{"url":"","status":"waiting","lasturl":"","lasterror":""},"video":{"queue":99,"dup":5,"drop":96,"enc":23,"input":{"state":"running","packet":248,"size_kb":1250,"time":8},"output":{"state":"running","packet":149,"size_kb":748,"time":5},"codec":"h264","profile":578,"level":31,"pix_fmt":"yuv420p","width":1280,"height":720},"audio":{"queue":175,"dup":0,"drop":0,"enc":1,"input":{"state":"running","packet":431,"size_kb":4,"time":8},"output":{"state":"running","packet":256,"size_kb":1,"time":5},"codec":"aac","profile":1,"level":-99,"sample_fmt":"fltp","sampling_hz":44100,"layout":"mono","channels":1}}`))
+
+ progress := parser.Progress()
+
+ require.Equal(t, Progress{
+ Started: true,
+ Input: []ProgressIO{
+ {
+ Address: "playout:https://cdn.livespotting.com/vpu/e9slfpe3/z60wzayk.m3u8",
+ Index: 0,
+ Stream: 0,
+ Format: "playout",
+ Type: "video",
+ Codec: "h264",
+ Coder: "h264",
+ Frame: 7,
+ Keyframe: 1,
+ FPS: 7. / 30,
+ Packet: 11,
+ PPS: 11. / 30,
+ Size: 42,
+ Bitrate: 42. * 8 / 30,
+ Pixfmt: "yuvj420p",
+ Quantizer: 0,
+ Width: 1280,
+ Height: 720,
+ Sampling: 0,
+ Layout: "",
+ Channels: 0,
+ AVstream: &AVstream{
+ Input: AVstreamIO{
+ State: "running",
+ Packet: 248,
+ Time: 8,
+ Size: 1250 * 1024,
+ },
+ Output: AVstreamIO{
+ State: "running",
+ Packet: 149,
+ Time: 5,
+ Size: 748 * 1024,
+ },
+ Aqueue: 0,
+ Queue: 99,
+ Dup: 5,
+ Drop: 96,
+ Enc: 23,
+ Looping: true,
+ Duplicating: true,
+ GOP: "key",
+ Mode: "live",
+ Swap: AVStreamSwap{
+ URL: "",
+ Status: "waiting",
+ LastURL: "",
+ LastError: "",
+ },
+ Codec: "h264",
+ Profile: 578,
+ Level: 31,
+ Pixfmt: "yuv420p",
+ Width: 1280,
+ Height: 720,
+ },
+ },
+ },
+ Output: []ProgressIO{
+ {
+ Address: "/dev/null",
+ Index: 0,
+ Stream: 0,
+ Format: "flv",
+ Type: "video",
+ Codec: "h264",
+ Coder: "libx264",
+ Frame: 7,
+ Keyframe: 1,
+ FPS: 7. / 30,
+ Packet: 0,
PPS: 0,
+ Size: 5,
+ Bitrate: 5. * 8 / 30,
+ Extradata: 32,
+ Pixfmt: "yuvj420p",
+ Quantizer: 0,
+ Width: 1280,
+ Height: 720,
+ Sampling: 0,
+ Layout: "",
+ Channels: 0,
+ AVstream: nil,
+ },
+ {
+ Address: "/dev/null",
+ Index: 1,
+ Stream: 0,
+ Format: "mp4",
+ Type: "video",
+ Codec: "h264",
+ Coder: "copy",
+ Frame: 11,
+ FPS: 11. / 30,
+ Packet: 11,
+ PPS: 11. / 30,
Size: 231424,
- Bitrate: 0,
+ Bitrate: 231424. * 8 / 30,
Pixfmt: "yuvj420p",
Quantizer: -1,
Width: 1280,
@@ -979,18 +1117,252 @@ func TestParserProgressPlayout(t *testing.T) {
},
Frame: 7,
Packet: 0,
- FPS: 0,
+ FPS: 7. / 30,
PPS: 0,
Quantizer: 0,
Size: 231424,
Time: 0.56,
- Bitrate: 0,
+ Bitrate: 231424. * 8 / 30,
Speed: 0.4,
Drop: 0,
Dup: 0,
}, progress)
}
+func TestParserProgressPlayoutAudioVideo(t *testing.T) {
+ parser := New(Config{
+ LogLines: 20,
+ }).(*parser)
+
+ parser.Parse([]byte(`ffmpeg.inputs:[{"url":"playout:http://192.168.1.220/memfs/source.m3u8","format":"playout","index":0,"stream":0,"type":"video","codec":"h264","coder":"h264","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","profile":578,"level":31,"fps":25.000000,"pix_fmt":"yuv420p","width":1280,"height":720},{"url":"playout:http://192.168.1.220/memfs/source.m3u8","format":"playout","index":0,"stream":1,"type":"audio","codec":"aac","coder":"aac","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","profile":1,"level":-99,"sample_fmt":"fltp","sampling_hz":44100,"layout":"mono","channels":1}]`))
+ parser.Parse([]byte(`ffmpeg.outputs:[{"url":"pipe:","format":"null","index":0,"stream":0,"type":"video","codec":"h264","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","profile":578,"level":31,"fps":25.000000,"pix_fmt":"yuv420p","width":1280,"height":720},{"url":"pipe:","format":"null","index":0,"stream":1,"type":"audio","codec":"aac","coder":"copy","bitrate_kbps":0,"duration_sec":0.000000,"language":"und","profile":1,"level":-99,"sample_fmt":"fltp","sampling_hz":44100,"layout":"mono","channels":1}]`))
+ parser.Parse([]byte(`ffmpeg.progress:{"inputs":[{"index":0,"stream":0,"framerate":{"min":25.000,"max":25.000,"avg":25.000},"gop":{"min":50.000,"max":50.000,"avg":50.000},"frame":101,"keyframe":3,"packet":101,"size_kb":518,"size_bytes":530273},{"index":0,"stream":1,"framerate":{"min":43.083,"max":43.083,"avg":43.083},"gop":{"min":1.000,"max":1.000,"avg":1.000},"frame":174,"keyframe":174,"packet":174,"size_kb":1,"size_bytes":713}],"outputs":[{"index":0,"stream":0,"framerate":{"min":25.000,"max":25.000,"avg":25.000},"gop":{"min":50.000,"max":50.000,"avg":50.000},"frame":101,"keyframe":3,"packet":101,"q":-1.0,"size_kb":518,"size_bytes":530273,"extradata_size_bytes":0},{"index":0,"stream":1,"framerate":{"min":43.083,"max":43.083,"avg":43.083},"gop":{"min":1.000,"max":1.000,"avg":1.000},"frame":174,"keyframe":174,"packet":174,"size_kb":1,"size_bytes":713,"extradata_size_bytes":0}],"frame":101,"packet":101,"q":-1.0,"size_kb":519,"size_bytes":530986,"time":"0h0m4.3s","speed":1,"dup":0,"drop":0}`))
+ parser.Parse([]byte(`avstream.progress:{"id":"playout:http://192.168.1.220/memfs/source.m3u8","url":"http://192.168.1.220/memfs/source.m3u8","stream":0,"queue":124,"aqueue":218,"dup":0,"drop":0,"enc":0,"looping":true,"looping_runtime":42,"duplicating":true,"gop":"key","mode":"live","input":{"state":"running","packet":679,"size_kb":1255,"time":7},"output":{"state":"running","packet":337,"size_kb":628,"time":4},"video":{"queue":124,"dup":1,"drop":2,"enc":3,"input":{"state":"running","packet":248,"size_kb":1250,"time":7},"output":{"state":"running","packet":124,"size_kb":627,"time":4},"codec":"h264","profile":578,"level":31,"pix_fmt":"yuv420p","width":1280,"height":720},"audio":{"queue":218,"dup":5,"drop":6,"enc":1,"input":{"state":"running","packet":431,"size_kb":4,"time":7},"output":{"state":"running","packet":213,"size_kb":0,"time":4},"codec":"aac","profile":1,"level":-99,"sample_fmt":"fltp","sampling_hz":44100,"layout":"mono","channels":1},"swap":{"url":"","status":"waiting","lasturl":"","lasterror":""}}`))
+
+ progress := parser.Progress()
+
+ require.Equal(t, Progress{
+ Started: true,
+ Input: []ProgressIO{
+ {
+ Address: "playout:http://192.168.1.220/memfs/source.m3u8",
+ Index: 0,
+ Stream: 0,
+ Format: "playout",
+ Type: "video",
+ Codec: "h264",
+ Coder: "h264",
+ Profile: 578,
+ Level: 31,
+ Frame: 101,
+ Keyframe: 3,
+ Framerate: struct {
+ Min float64
+ Max float64
+ Average float64
+ }{25, 25, 25},
+ FPS: 101. / 30,
+ Packet: 101,
+ PPS: 101. / 30,
+ Size: 530273,
+ Bitrate: 530273. * 8 / 30,
+ Pixfmt: "yuv420p",
+ Quantizer: 0,
+ Width: 1280,
+ Height: 720,
+ Samplefmt: "",
+ Sampling: 0,
+ Layout: "",
+ Channels: 0,
+ AVstream: &AVstream{
+ Input: AVstreamIO{
+ State: "running",
+ Packet: 248,
+ Time: 7,
+ Size: 1250 * 1024,
+ },
+ Output: AVstreamIO{
+ State: "running",
+ Packet: 124,
+ Time: 4,
+ Size: 642048,
+ },
+ Aqueue: 0,
+ Queue: 124,
+ Dup: 1,
+ Drop: 2,
+ Enc: 3,
+ Looping: true,
+ LoopingRuntime: 42,
+ Duplicating: true,
+ GOP: "key",
+ Mode: "live",
+ Swap: AVStreamSwap{
+ URL: "",
+ Status: "waiting",
+ LastURL: "",
+ LastError: "",
+ },
+ Codec: "h264",
+ Profile: 578,
+ Level: 31,
+ Pixfmt: "yuv420p",
+ Width: 1280,
+ Height: 720,
+ },
+ },
+ {
+ Address: "playout:http://192.168.1.220/memfs/source.m3u8",
+ Index: 0,
+ Stream: 1,
+ Format: "playout",
+ Type: "audio",
+ Codec: "aac",
+ Coder: "aac",
+ Profile: 1,
+ Level: -99,
+ Frame: 174,
+ Keyframe: 174,
+ Framerate: struct {
+ Min float64
+ Max float64
+ Average float64
+ }{43.083, 43.083, 43.083},
+ FPS: 174. / 30,
+ Packet: 174,
+ PPS: 174. / 30,
+ Size: 713,
+ Bitrate: 713. * 8 / 30,
+ Pixfmt: "",
+ Quantizer: 0,
+ Width: 0,
+ Height: 0,
+ Samplefmt: "fltp",
+ Sampling: 44100,
+ Layout: "mono",
+ Channels: 1,
+ AVstream: &AVstream{
+ Input: AVstreamIO{
+ State: "running",
+ Packet: 431,
+ Time: 7,
+ Size: 4096,
+ },
+ Output: AVstreamIO{
+ State: "running",
+ Packet: 213,
+ Time: 4,
+ Size: 0,
+ },
+ Aqueue: 0,
+ Queue: 218,
+ Dup: 5,
+ Drop: 6,
+ Enc: 1,
+ Looping: true,
+ LoopingRuntime: 42,
+ Duplicating: true,
+ GOP: "key",
+ Mode: "live",
+ Swap: AVStreamSwap{
+ URL: "",
+ Status: "waiting",
+ LastURL: "",
+ LastError: "",
+ },
+ Codec: "aac",
+ Profile: 1,
+ Level: -99,
+ Pixfmt: "",
+ Width: 0,
+ Height: 0,
+ Samplefmt: "fltp",
+ Sampling: 44100,
+ Layout: "mono",
+ Channels: 1,
+ },
+ },
+ },
+ Output: []ProgressIO{
+ {
+ Address: "pipe:",
+ Index: 0,
+ Stream: 0,
+ Format: "null",
+ Type: "video",
+ Codec: "h264",
+ Coder: "copy",
+ Profile: 578,
+ Level: 31,
+ Frame: 101,
+ Keyframe: 3,
+ Framerate: struct {
+ Min float64
+ Max float64
+ Average float64
+ }{25, 25, 25},
+ FPS: 101. / 30,
+ Packet: 101,
+ PPS: 101. / 30,
+ Size: 530273,
+ Bitrate: 530273. * 8 / 30,
+ Extradata: 0,
+ Pixfmt: "yuv420p",
+ Quantizer: -1,
+ Width: 1280,
+ Height: 720,
+ Sampling: 0,
+ Layout: "",
+ Channels: 0,
+ AVstream: nil,
+ },
+ {
+ Address: "pipe:",
+ Index: 0,
+ Stream: 1,
+ Format: "null",
+ Type: "audio",
+ Codec: "aac",
+ Coder: "copy",
+ Profile: 1,
+ Level: -99,
+ Frame: 174,
+ Keyframe: 174,
+ Framerate: struct {
+ Min float64
+ Max float64
+ Average float64
+ }{43.083, 43.083, 43.083},
+ FPS: 174. / 30,
+ Packet: 174,
+ PPS: 174. / 30,
+ Size: 713,
+ Bitrate: 713. * 8 / 30,
+ Pixfmt: "",
+ Quantizer: 0,
+ Width: 0,
+ Height: 0,
+ Samplefmt: "fltp",
+ Sampling: 44100,
+ Layout: "mono",
+ Channels: 1,
+ AVstream: nil,
+ },
+ },
+ Frame: 101,
+ Packet: 101,
+ FPS: 101. / 30,
+ PPS: 101. / 30,
+ Quantizer: -1,
+ Size: 530986,
+ Time: 4.3,
+ Bitrate: 530986. * 8 / 30,
+ Speed: 1,
+ Drop: 0,
+ Dup: 0,
+ }, progress)
+}
+
func TestParserStreamMapping(t *testing.T) {
parser := New(Config{
LogLines: 20,
diff --git a/ffmpeg/parse/types.go b/ffmpeg/parse/types.go
index 01ffcc78..1c98f6e8 100644
--- a/ffmpeg/parse/types.go
+++ b/ffmpeg/parse/types.go
@@ -74,43 +74,97 @@ func (avswap *ffmpegAVStreamSwap) export() AVStreamSwap {
}
}
+type ffmpegAVStreamTrack struct {
+ Queue uint64 `json:"queue"`
+ Dup uint64 `json:"dup"`
+ Drop uint64 `json:"drop"`
+ Enc uint64 `json:"enc"`
+ Input ffmpegAVstreamIO `json:"input"`
+ Output ffmpegAVstreamIO `json:"output"`
+ Codec string `json:"codec"`
+ Profile int `json:"profile"`
+ Level int `json:"level"`
+ Pixfmt string `json:"pix_fmt"`
+ Width uint64 `json:"width"`
+ Height uint64 `json:"height"`
+ Samplefmt string `json:"sample_fmt"`
+ Sampling uint64 `json:"sampling_hz"`
+ Layout string `json:"layout"`
+ Channels uint64 `json:"channels"`
+}
+
type ffmpegAVstream struct {
- Input ffmpegAVstreamIO `json:"input"`
- Output ffmpegAVstreamIO `json:"output"`
- Address string `json:"id"`
- URL string `json:"url"`
- Stream uint64 `json:"stream"`
- Aqueue uint64 `json:"aqueue"`
- Queue uint64 `json:"queue"`
- Dup uint64 `json:"dup"`
- Drop uint64 `json:"drop"`
- Enc uint64 `json:"enc"`
- Looping bool `json:"looping"`
- LoopingRuntime uint64 `json:"looping_runtime"`
- Duplicating bool `json:"duplicating"`
- GOP string `json:"gop"`
- Mode string `json:"mode"`
- Debug interface{} `json:"debug"`
- Swap ffmpegAVStreamSwap `json:"swap"`
-}
-
-func (av *ffmpegAVstream) export() *AVstream {
- return &AVstream{
- Aqueue: av.Aqueue,
- Queue: av.Queue,
- Drop: av.Drop,
- Dup: av.Dup,
- Enc: av.Enc,
+ Input ffmpegAVstreamIO `json:"input"`
+ Output ffmpegAVstreamIO `json:"output"`
+ Audio ffmpegAVStreamTrack `json:"audio"`
+ Video ffmpegAVStreamTrack `json:"video"`
+ Address string `json:"id"`
+ URL string `json:"url"`
+ Stream uint64 `json:"stream"`
+ Aqueue uint64 `json:"aqueue"`
+ Queue uint64 `json:"queue"`
+ Dup uint64 `json:"dup"`
+ Drop uint64 `json:"drop"`
+ Enc uint64 `json:"enc"`
+ Looping bool `json:"looping"`
+ LoopingRuntime uint64 `json:"looping_runtime"`
+ Duplicating bool `json:"duplicating"`
+ GOP string `json:"gop"`
+ Mode string `json:"mode"`
+ Debug interface{} `json:"debug"`
+ Swap ffmpegAVStreamSwap `json:"swap"`
+}
+
+func (av *ffmpegAVstream) export(trackType string) *AVstream {
+ avs := &AVstream{
Looping: av.Looping,
LoopingRuntime: av.LoopingRuntime,
Duplicating: av.Duplicating,
GOP: av.GOP,
Mode: av.Mode,
- Input: av.Input.export(),
- Output: av.Output.export(),
Debug: av.Debug,
Swap: av.Swap.export(),
}
+
+ hasTracks := len(av.Video.Codec) != 0
+
+ if hasTracks {
+ var track *ffmpegAVStreamTrack = nil
+
+ if trackType == "audio" {
+ track = &av.Audio
+ } else {
+ track = &av.Video
+ }
+
+ avs.Queue = track.Queue
+ avs.Drop = track.Drop
+ avs.Dup = track.Dup
+ avs.Enc = track.Enc
+ avs.Input = track.Input.export()
+ avs.Output = track.Output.export()
+
+ avs.Codec = track.Codec
+ avs.Profile = track.Profile
+ avs.Level = track.Level
+ avs.Pixfmt = track.Pixfmt
+ avs.Width = track.Width
+ avs.Height = track.Height
+ avs.Samplefmt = track.Samplefmt
+ avs.Sampling = track.Sampling
+ avs.Layout = track.Layout
+ avs.Channels = track.Channels
+ } else {
+ avs.Queue = av.Queue
+ avs.Aqueue = av.Aqueue
+ avs.Drop = av.Drop
+ avs.Dup = av.Dup
+ avs.Enc = av.Enc
+ avs.Input = av.Input.export()
+ avs.Output = av.Output.export()
+ }
+
+ return avs
}
type ffmpegProgressIO struct {
@@ -218,6 +272,8 @@ type ffmpegProcessIO struct {
Type string `json:"type"`
Codec string `json:"codec"`
Coder string `json:"coder"`
+ Profile int `json:"profile"`
+ Level int `json:"level"`
// video
Pixfmt string `json:"pix_fmt"`
@@ -225,26 +281,30 @@ type ffmpegProcessIO struct {
Height uint64 `json:"height"`
// audio
- Sampling uint64 `json:"sampling_hz"`
- Layout string `json:"layout"`
- Channels uint64 `json:"channels"`
+ Samplefmt string `json:"sample_fmt"`
+ Sampling uint64 `json:"sampling_hz"`
+ Layout string `json:"layout"`
+ Channels uint64 `json:"channels"`
}
func (io *ffmpegProcessIO) export() ProgressIO {
return ProgressIO{
- Address: io.Address,
- Format: io.Format,
- Index: io.Index,
- Stream: io.Stream,
- Type: io.Type,
- Codec: io.Codec,
- Coder: io.Coder,
- Pixfmt: io.Pixfmt,
- Width: io.Width,
- Height: io.Height,
- Sampling: io.Sampling,
- Layout: io.Layout,
- Channels: io.Channels,
+ Address: io.Address,
+ Format: io.Format,
+ Index: io.Index,
+ Stream: io.Stream,
+ Type: io.Type,
+ Codec: io.Codec,
+ Coder: io.Coder,
+ Profile: io.Profile,
+ Level: io.Level,
+ Pixfmt: io.Pixfmt,
+ Width: io.Width,
+ Height: io.Height,
+ Samplefmt: io.Samplefmt,
+ Sampling: io.Sampling,
+ Layout: io.Layout,
+ Channels: io.Channels,
}
}
@@ -422,6 +482,8 @@ type ProgressIO struct {
Type string
Codec string
Coder string
+ Profile int
+ Level int
Frame uint64
Keyframe uint64
Framerate struct {
@@ -443,9 +505,10 @@ type ProgressIO struct {
Height uint64
// Audio
- Sampling uint64
- Layout string
- Channels uint64
+ Samplefmt string
+ Sampling uint64 // Hz
+ Layout string // mono, stereo, ...
+ Channels uint64
// avstream
AVstream *AVstream
@@ -498,11 +561,22 @@ type AVstream struct {
Mode string
Debug interface{}
Swap AVStreamSwap
+ Codec string
+ Profile int
+ Level int
+ Pixfmt string
+ Width uint64
+ Height uint64
+ Samplefmt string
+ Sampling uint64
+ Layout string
+ Channels uint64
}
type Usage struct {
CPU UsageCPU
Memory UsageMemory
+ GPU UsageGPU
}
type UsageCPU struct {
@@ -513,7 +587,27 @@ type UsageCPU struct {
}
type UsageMemory struct {
+ Average uint64
+ Max uint64
+ Limit uint64
+}
+
+type UsageGPU struct {
+ Index int
+ Usage UsageGPUUsage
+ Encoder UsageGPUUsage
+ Decoder UsageGPUUsage
+ Memory UsageGPUMemory
+}
+
+type UsageGPUUsage struct {
Average float64
+ Max float64
+ Limit float64
+}
+
+type UsageGPUMemory struct {
+ Average uint64
Max uint64
Limit uint64
}
diff --git a/ffmpeg/skills/skills_test.go b/ffmpeg/skills/skills_test.go
index 0ee8b74e..f8f67bfd 100644
--- a/ffmpeg/skills/skills_test.go
+++ b/ffmpeg/skills/skills_test.go
@@ -35,7 +35,7 @@ func TestNewInvalidBinary(t *testing.T) {
}
func TestNew(t *testing.T) {
- binary, err := testhelper.BuildBinary("ffmpeg", "../../internal/testhelper")
+ binary, err := testhelper.BuildBinary("ffmpeg")
require.NoError(t, err, "Failed to build helper program")
skills, err := New(binary)
@@ -326,7 +326,7 @@ func TestEqualEmptySkills(t *testing.T) {
}
func TestEqualSkills(t *testing.T) {
- binary, err := testhelper.BuildBinary("ffmpeg", "../../internal/testhelper")
+ binary, err := testhelper.BuildBinary("ffmpeg")
require.NoError(t, err, "Failed to build helper program")
s1, err := New(binary)
diff --git a/go.mod b/go.mod
index a19cd943..47f22d5c 100644
--- a/go.mod
+++ b/go.mod
@@ -1,60 +1,57 @@
module github.com/datarhei/core/v16
-go 1.22.0
+go 1.22.5
-toolchain go1.22.1
+toolchain go1.23.1
require (
- github.com/99designs/gqlgen v0.17.49
- github.com/Masterminds/semver/v3 v3.2.1
- github.com/adhocore/gronx v1.8.1
+ github.com/99designs/gqlgen v0.17.55
+ github.com/Masterminds/semver/v3 v3.3.0
+ github.com/adhocore/gronx v1.19.1
github.com/andybalholm/brotli v1.1.0
github.com/atrox/haikunatorgo/v2 v2.0.1
- github.com/caddyserver/certmagic v0.21.3
- github.com/datarhei/gosrt v0.6.0
+ github.com/caddyserver/certmagic v0.21.4
+ github.com/datarhei/gosrt v0.7.0
github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e
+ github.com/dolthub/swiss v0.2.1
github.com/fujiwara/shapeio v1.0.0
- github.com/go-playground/validator/v10 v10.22.0
+ github.com/go-playground/validator/v10 v10.22.1
github.com/gobwas/glob v0.2.3
- github.com/goccy/go-json v0.10.3
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/google/gops v0.3.28
github.com/google/uuid v1.6.0
github.com/hashicorp/go-hclog v1.6.3
- github.com/hashicorp/raft v1.7.0
+ github.com/hashicorp/raft v1.7.1
github.com/hashicorp/raft-boltdb/v2 v2.3.0
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.5.1
- github.com/klauspost/compress v1.17.9
+ github.com/klauspost/compress v1.17.10
github.com/klauspost/cpuid/v2 v2.2.8
github.com/labstack/echo/v4 v4.12.0
- github.com/lestrrat-go/strftime v1.0.6
+ github.com/lestrrat-go/strftime v1.1.0
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.20
- github.com/minio/minio-go/v7 v7.0.74
- github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
- github.com/prometheus/client_golang v1.19.1
+ github.com/minio/minio-go/v7 v7.0.77
+ github.com/prometheus/client_golang v1.20.4
github.com/puzpuzpuz/xsync/v3 v3.4.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/stretchr/testify v1.9.0
github.com/swaggo/echo-swagger v1.4.1
github.com/swaggo/swag v1.16.3
github.com/tklauser/go-sysconf v0.3.14
- github.com/vektah/gqlparser/v2 v2.5.16
+ github.com/vektah/gqlparser/v2 v2.5.17
github.com/xeipuuv/gojsonschema v1.2.0
- go.etcd.io/bbolt v1.3.10
- go.uber.org/automaxprocs v1.5.3
+ go.etcd.io/bbolt v1.3.11
+ go.uber.org/automaxprocs v1.6.0
go.uber.org/zap v1.27.0
- golang.org/x/crypto v0.25.0
- golang.org/x/mod v0.19.0
+ golang.org/x/crypto v0.28.0
+ golang.org/x/mod v0.21.0
)
-//replace github.com/datarhei/core-client-go/v16 => ../core-client-go
-
require (
github.com/KyleBanks/depth v1.2.1 // indirect
- github.com/agnivade/levenshtein v1.1.1 // indirect
+ github.com/agnivade/levenshtein v1.2.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
github.com/beorn7/perks v1.0.1 // indirect
@@ -63,9 +60,10 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dolthub/maphash v0.1.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.17.0 // indirect
- github.com/gabriel-vasile/mimetype v1.4.4 // indirect
+ github.com/gabriel-vasile/mimetype v1.4.5 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
@@ -75,6 +73,7 @@ require (
github.com/go-openapi/swag v0.23.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
+ github.com/goccy/go-json v0.10.3 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@@ -86,42 +85,41 @@ require (
github.com/labstack/gommon v0.4.2 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/libdns/libdns v0.2.2 // indirect
- github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect
+ github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mholt/acmez/v2 v2.0.1 // indirect
- github.com/miekg/dns v1.1.61 // indirect
+ github.com/mholt/acmez/v2 v2.0.3 // indirect
+ github.com/miekg/dns v1.1.62 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common v0.55.0 // indirect
+ github.com/prometheus/common v0.60.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
- github.com/rs/xid v1.5.0 // indirect
+ github.com/rs/xid v1.6.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sosodev/duration v1.3.1 // indirect
github.com/swaggo/files/v2 v2.0.1 // indirect
- github.com/tklauser/numcpus v0.8.0 // indirect
- github.com/urfave/cli/v2 v2.27.2 // indirect
+ github.com/tklauser/numcpus v0.9.0 // indirect
+ github.com/urfave/cli/v2 v2.27.4 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
- github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect
+ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
- github.com/zeebo/blake3 v0.2.3 // indirect
+ github.com/zeebo/blake3 v0.2.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/net v0.27.0 // indirect
- golang.org/x/sync v0.7.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.23.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
+ golang.org/x/net v0.30.0 // indirect
+ golang.org/x/sync v0.8.0 // indirect
+ golang.org/x/sys v0.26.0 // indirect
+ golang.org/x/text v0.19.0 // indirect
+ golang.org/x/time v0.7.0 // indirect
+ golang.org/x/tools v0.26.0 // indirect
+ google.golang.org/protobuf v1.35.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 4d098652..95462ea3 100644
--- a/go.sum
+++ b/go.sum
@@ -1,16 +1,16 @@
-github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ=
-github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0=
+github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM=
+github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
-github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
-github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
-github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE=
-github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
-github.com/adhocore/gronx v1.8.1 h1:F2mLTG5sB11z7vplwD4iydz3YCEjstSfYmCrdSm3t6A=
-github.com/adhocore/gronx v1.8.1/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
-github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
-github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
+github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0=
+github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
+github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0=
+github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U=
+github.com/adhocore/gronx v1.19.1 h1:S4c3uVp5jPjnk00De0lslyTenGJ4nA3Ydbkj1SbdPVc=
+github.com/adhocore/gronx v1.19.1/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg=
+github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY=
+github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -35,8 +35,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/caddyserver/certmagic v0.21.3 h1:pqRRry3yuB4CWBVq9+cUqu+Y6E2z8TswbhNx1AZeYm0=
-github.com/caddyserver/certmagic v0.21.3/go.mod h1:Zq6pklO9nVRl3DIFUw9gVUfXKdpc/0qwTUAQMBlfgtI=
+github.com/caddyserver/certmagic v0.21.4 h1:e7VobB8rffHv8ZZpSiZtEwnLDHUwLVYLWzWSa1FfKI0=
+github.com/caddyserver/certmagic v0.21.4/go.mod h1:swUXjQ1T9ZtMv95qj7/InJvWLXURU85r+CfG0T+ZbDE=
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -46,15 +46,19 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/datarhei/gosrt v0.6.0 h1:HrrXAw90V78ok4WMIhX6se1aTHPCn82Sg2hj+PhdmGc=
-github.com/datarhei/gosrt v0.6.0/go.mod h1:fsOWdLSHUHShHjgi/46h6wjtdQrtnSdAQFnlas8ONxs=
+github.com/datarhei/gosrt v0.7.0 h1:1/IY66HVVgqGA9zkmL5l6jUFuI8t/76WkuamSkJqHqs=
+github.com/datarhei/gosrt v0.7.0/go.mod h1:wTDoyog1z4au8Fd/QJBQAndzvccuxjqUL/qMm0EyJxE=
github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e h1:Qc/0D4xvXrazFkoi/4UGqO15yQ1JN5I8h7RwdzCLgTY=
github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
-github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
+github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
+github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
+github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
+github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw=
+github.com/dolthub/swiss v0.2.1/go.mod h1:8AhKZZ1HK7g18j7v7k6c5cYIGEZJcPn0ARsai8cUrh0=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
@@ -63,8 +67,8 @@ github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/fujiwara/shapeio v1.0.0 h1:xG5D9oNqCSUUbryZ/jQV3cqe1v2suEjwPIcEg1gKM8M=
github.com/fujiwara/shapeio v1.0.0/go.mod h1:LmEmu6L/8jetyj1oewewFb7bZCNRwE7wLCUNzDLaLVA=
-github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I=
-github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s=
+github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4=
+github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
@@ -90,8 +94,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
-github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
+github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA=
+github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@@ -137,8 +141,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
-github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o=
-github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0=
+github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY=
+github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM=
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ=
github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0=
github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA=
@@ -155,10 +159,9 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0=
+github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -170,6 +173,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/labstack/echo/v4 v4.12.0 h1:IKpw49IMryVB2p1a4dzwlhP1O2Tf2E0Ir/450lH+kI0=
github.com/labstack/echo/v4 v4.12.0/go.mod h1:UP9Cr2DJXbOK3Kr9ONYzNowSh7HP0aG0ShAyycHSJvM=
github.com/labstack/gommon v0.4.2 h1:F8qTUNXgG1+6WQmqoUWnz8WiEU60mXVVw0P4ht1WRA0=
@@ -178,14 +183,14 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8=
github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is=
-github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ=
-github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw=
+github.com/lestrrat-go/strftime v1.1.0 h1:gMESpZy44/4pXLO/m+sL0yBd1W6LjgjrrD4a68Gapyg=
+github.com/lestrrat-go/strftime v1.1.0/go.mod h1:uzeIB52CeUJenCo1syghlugshMysrqUT51HlxphXVeI=
github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
-github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI=
-github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
+github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
+github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -198,14 +203,14 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mholt/acmez/v2 v2.0.1 h1:3/3N0u1pLjMK4sNEAFSI+bcvzbPhRpY383sy1kLHJ6k=
-github.com/mholt/acmez/v2 v2.0.1/go.mod h1:fX4c9r5jYwMyMsC+7tkYRxHibkOTgta5DIFGoe67e1U=
-github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
-github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
+github.com/mholt/acmez/v2 v2.0.3 h1:CgDBlEwg3QBp6s45tPQmFIBrkRIkBT4rW4orMM6p4sw=
+github.com/mholt/acmez/v2 v2.0.3/go.mod h1:pQ1ysaDeGrIMvJ9dfJMk5kJNkn7L2sb3UhyrX6Q91cw=
+github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ=
+github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0=
-github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
+github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw=
+github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -219,8 +224,6 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -228,13 +231,11 @@ github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
-github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc=
-github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -242,8 +243,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
+github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA=
+github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@@ -253,8 +254,8 @@ github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
-github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
-github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
+github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
+github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
@@ -286,17 +287,17 @@ github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg=
github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
-github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
-github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
+github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
+github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI=
-github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM=
+github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8=
+github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
-github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8=
-github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
+github.com/vektah/gqlparser/v2 v2.5.17 h1:9At7WblLV7/36nulgekUgIaqHZWn5hxqluxrxGUhOmI=
+github.com/vektah/gqlparser/v2 v2.5.17/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -304,20 +305,20 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw=
-github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4=
+github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
-github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
-github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
-go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
-go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
-go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
-go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
+go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
+go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
+go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -326,19 +327,19 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
-golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
-golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
-golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
+golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -355,19 +356,19 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
-golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
-golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
-golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
+golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
+google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/http/api/about.go b/http/api/about.go
index 0203ae1d..f660d6e5 100644
--- a/http/api/about.go
+++ b/http/api/about.go
@@ -2,17 +2,18 @@ package api
// About is some general information about the API
type About struct {
- App string `json:"app"`
- Auths []string `json:"auths"`
- Name string `json:"name"`
- ID string `json:"id"`
- CreatedAt string `json:"created_at"` // RFC3339
- Uptime uint64 `json:"uptime_seconds"`
- Version Version `json:"version"`
+ App string `json:"app"`
+ Auths []string `json:"auths"`
+ Name string `json:"name"`
+ ID string `json:"id"`
+ CreatedAt string `json:"created_at"` // RFC3339
+ Uptime uint64 `json:"uptime_seconds"`
+ Version AboutVersion `json:"version"`
+ Resources AboutResources `json:"resources"`
}
-// Version is some information about the binary
-type Version struct {
+// AboutVersion is some information about the binary
+type AboutVersion struct {
Number string `json:"number"`
Commit string `json:"repository_commit"`
Branch string `json:"repository_branch"`
@@ -21,13 +22,37 @@ type Version struct {
Compiler string `json:"compiler"`
}
+// AboutResources holds information about the current resource usage
+type AboutResources struct {
+ IsThrottling bool `json:"is_throttling"` // Whether this core is currently throttling
+ NCPU float64 `json:"ncpu"` // Number of CPU on this node
+ CPU float64 `json:"cpu_used"` // Current CPU load, 0-100*ncpu
+ CPULimit float64 `json:"cpu_limit"` // Defined CPU load limit, 0-100*ncpu
+ CPUCore float64 `json:"cpu_core"` // Current CPU load of the core itself, 0-100*ncpu
+ Mem uint64 `json:"memory_used_bytes"` // Currently used memory in bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
+ MemCore uint64 `json:"memory_core_bytes"` // Current used memory of the core itself in bytes
+ GPU []AboutGPUResources `json:"gpu"` // GPU resources
+}
+
+type AboutGPUResources struct {
+ Mem uint64 `json:"memory_used_bytes"` // Currently used memory in bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
+ Usage float64 `json:"usage_general"` // Current general usage, 0-100
+ UsageLimit float64 `json:"usage_limit"` // Defined general usage limit, 0-100
+ Encoder float64 `json:"usage_encoder"` // Current encoder usage, 0-100
+ Decoder float64 `json:"usage_decoder"` // Current decoder usage, 0-100
+}
+
// MinimalAbout is the minimal information about the API
type MinimalAbout struct {
- App string `json:"app"`
- Auths []string `json:"auths"`
- Version VersionMinimal `json:"version"`
+ App string `json:"app"`
+ Auths []string `json:"auths"`
+ Version AboutVersionMinimal `json:"version"`
}
-type VersionMinimal struct {
+type AboutVersionMinimal struct {
Number string `json:"number"`
}
diff --git a/http/api/avstream.go b/http/api/avstream.go
index 5af4e90d..1b68faf4 100644
--- a/http/api/avstream.go
+++ b/http/api/avstream.go
@@ -46,6 +46,18 @@ type AVstream struct {
Duplicating bool `json:"duplicating"`
GOP string `json:"gop"`
Mode string `json:"mode"`
+
+ // Codec parameter
+ Codec string `json:"codec"`
+ Profile int `json:"profile"`
+ Level int `json:"level"`
+ Pixfmt string `json:"pix_fmt,omitempty"`
+ Width uint64 `json:"width,omitempty" format:"uint64"`
+ Height uint64 `json:"height,omitempty" format:"uint64"`
+ Samplefmt string `json:"sample_fmt,omitempty"`
+ Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"`
+ Layout string `json:"layout,omitempty"`
+ Channels uint64 `json:"channels,omitempty" format:"uint64"`
}
func (a *AVstream) Unmarshal(av *app.AVstream) {
@@ -53,6 +65,9 @@ func (a *AVstream) Unmarshal(av *app.AVstream) {
return
}
+ a.Input.Unmarshal(&av.Input)
+ a.Output.Unmarshal(&av.Output)
+
a.Aqueue = av.Aqueue
a.Queue = av.Queue
a.Dup = av.Dup
@@ -64,8 +79,16 @@ func (a *AVstream) Unmarshal(av *app.AVstream) {
a.GOP = av.GOP
a.Mode = av.Mode
- a.Input.Unmarshal(&av.Input)
- a.Output.Unmarshal(&av.Output)
+ a.Codec = av.Codec
+ a.Profile = av.Profile
+ a.Level = av.Level
+ a.Pixfmt = av.Pixfmt
+ a.Width = av.Width
+ a.Height = av.Height
+ a.Samplefmt = av.Samplefmt
+ a.Sampling = av.Sampling
+ a.Layout = av.Layout
+ a.Channels = av.Channels
}
func (a *AVstream) Marshal() *app.AVstream {
@@ -82,6 +105,16 @@ func (a *AVstream) Marshal() *app.AVstream {
Duplicating: a.Duplicating,
GOP: a.GOP,
Mode: a.Mode,
+ Codec: a.Codec,
+ Profile: a.Profile,
+ Level: a.Level,
+ Pixfmt: a.Pixfmt,
+ Width: a.Width,
+ Height: a.Height,
+ Samplefmt: a.Samplefmt,
+ Sampling: a.Sampling,
+ Layout: a.Layout,
+ Channels: a.Channels,
}
return av
diff --git a/http/api/avstream_test.go b/http/api/avstream_test.go
index f8bb4d73..198a9153 100644
--- a/http/api/avstream_test.go
+++ b/http/api/avstream_test.go
@@ -47,6 +47,16 @@ func TestAVStream(t *testing.T) {
Duplicating: true,
GOP: "gop",
Mode: "mode",
+ Codec: "h264",
+ Profile: 858,
+ Level: 64,
+ Pixfmt: "yuv420p",
+ Width: 1920,
+ Height: 1080,
+ Samplefmt: "fltp",
+ Sampling: 44100,
+ Layout: "stereo",
+ Channels: 42,
}
p := AVstream{}
diff --git a/http/api/cluster.go b/http/api/cluster.go
index 1bfb45bc..eeed2b8b 100644
--- a/http/api/cluster.go
+++ b/http/api/cluster.go
@@ -39,16 +39,27 @@ type ClusterNodeCore struct {
}
type ClusterNodeResources struct {
- IsThrottling bool `json:"is_throttling"`
- NCPU float64 `json:"ncpu"`
- CPU float64 `json:"cpu_used"` // percent 0-100*npcu
- CPULimit float64 `json:"cpu_limit"` // percent 0-100*npcu
- CPUCore float64 `json:"cpu_core"` // percent 0-100*ncpu
- Mem uint64 `json:"memory_used_bytes"` // bytes
- MemLimit uint64 `json:"memory_limit_bytes"` // bytes
- MemTotal uint64 `json:"memory_total_bytes"` // bytes
- MemCore uint64 `json:"memory_core_bytes"` // bytes
- Error string `json:"error"`
+ IsThrottling bool `json:"is_throttling"`
+ NCPU float64 `json:"ncpu"`
+ CPU float64 `json:"cpu_used"` // percent 0-100*npcu
+ CPULimit float64 `json:"cpu_limit"` // percent 0-100*npcu
+ CPUCore float64 `json:"cpu_core"` // percent 0-100*ncpu
+ Mem uint64 `json:"memory_used_bytes"` // bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // bytes
+ MemCore uint64 `json:"memory_core_bytes"` // bytes
+ GPU []ClusterNodeGPUResources `json:"gpu"` // GPU resources
+ Error string `json:"error"`
+}
+
+type ClusterNodeGPUResources struct {
+ Mem uint64 `json:"memory_used_bytes"` // Currently used memory in bytes
+ MemLimit uint64 `json:"memory_limit_bytes"` // Defined memory limit in bytes
+ MemTotal uint64 `json:"memory_total_bytes"` // Total available memory in bytes
+ Usage float64 `json:"usage_general"` // Current general usage, 0-100
+ UsageLimit float64 `json:"usage_limit"` // Defined general usage limit, 0-100
+ Encoder float64 `json:"usage_encoder"` // Current encoder usage, 0-100
+ Decoder float64 `json:"usage_decoder"` // Current decoder usage, 0-100
}
type ClusterRaft struct {
diff --git a/http/api/event.go b/http/api/event.go
index 416e37d5..0685cb82 100644
--- a/http/api/event.go
+++ b/http/api/event.go
@@ -15,6 +15,7 @@ type Event struct {
Component string `json:"event"`
Message string `json:"message"`
Caller string `json:"caller"`
+ CoreID string `json:"core_id,omitempty"`
Data map[string]string `json:"data"`
}
@@ -66,12 +67,18 @@ func (e *Event) Filter(ef *EventFilter) bool {
}
}
- if ef.reCaller != nil {
+ if len(e.Caller) != 0 && ef.reCaller != nil {
if !ef.reCaller.MatchString(e.Caller) {
return false
}
}
+ if len(e.CoreID) != 0 && ef.reCoreID != nil {
+ if !ef.reCoreID.MatchString(e.CoreID) {
+ return false
+ }
+ }
+
for k, r := range ef.reData {
v, ok := e.Data[k]
if !ok {
@@ -91,11 +98,13 @@ type EventFilter struct {
Message string `json:"message"`
Level string `json:"level"`
Caller string `json:"caller"`
+ CoreID string `json:"core_id"`
Data map[string]string `json:"data"`
reMessage *regexp.Regexp
reLevel *regexp.Regexp
reCaller *regexp.Regexp
+ reCoreID *regexp.Regexp
reData map[string]*regexp.Regexp
}
@@ -131,6 +140,15 @@ func (ef *EventFilter) Compile() error {
ef.reCaller = r
}
+ if len(ef.CoreID) != 0 {
+ r, err := regexp.Compile("(?i)" + ef.CoreID)
+ if err != nil {
+ return err
+ }
+
+ ef.reCoreID = r
+ }
+
ef.reData = make(map[string]*regexp.Regexp)
for k, v := range ef.Data {
diff --git a/http/api/process.go b/http/api/process.go
index baf87707..c7cae209 100644
--- a/http/api/process.go
+++ b/http/api/process.go
@@ -155,9 +155,13 @@ type ProcessConfigIOCleanup struct {
}
type ProcessConfigLimits struct {
- CPU float64 `json:"cpu_usage" jsonschema:"minimum=0"`
- Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"`
- WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"`
+ CPU float64 `json:"cpu_usage" jsonschema:"minimum=0"` // percent 0-100*ncpu
+ Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"` // megabytes
+ GPUUsage float64 `json:"gpu_usage" jsonschema:"minimum=0"` // percent 0-100
+ GPUEncoder float64 `json:"gpu_encoder" jsonschema:"minimum=0"` // percent 0-100
+ GPUDecoder float64 `json:"gpu_decoder" jsonschema:"minimum=0"` // percent 0-100
+ GPUMemory uint64 `json:"gpu_memory_mbytes" jsonschema:"minimum=0" format:"uint64"` // megabytes
+ WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"` // seconds
}
// ProcessConfig represents the configuration of an ffmpeg process
@@ -197,7 +201,13 @@ func (cfg *ProcessConfig) Marshal() (*app.Config, map[string]interface{}) {
Scheduler: cfg.Scheduler,
LimitCPU: cfg.Limits.CPU,
LimitMemory: cfg.Limits.Memory * 1024 * 1024,
- LimitWaitFor: cfg.Limits.WaitFor,
+ LimitGPU: app.ConfigLimitGPU{
+ Usage: cfg.Limits.GPUUsage,
+ Encoder: cfg.Limits.GPUEncoder,
+ Decoder: cfg.Limits.GPUDecoder,
+ Memory: cfg.Limits.GPUMemory * 1024 * 1024,
+ },
+ LimitWaitFor: cfg.Limits.WaitFor,
}
cfg.generateInputOutputIDs(cfg.Input)
@@ -283,6 +293,10 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config, metadata map[string]interface
cfg.Scheduler = c.Scheduler
cfg.Limits.CPU = c.LimitCPU
cfg.Limits.Memory = c.LimitMemory / 1024 / 1024
+ cfg.Limits.GPUUsage = c.LimitGPU.Usage
+ cfg.Limits.GPUEncoder = c.LimitGPU.Encoder
+ cfg.Limits.GPUDecoder = c.LimitGPU.Decoder
+ cfg.Limits.GPUMemory = c.LimitGPU.Memory / 1024 / 1024
cfg.Limits.WaitFor = c.LimitWaitFor
cfg.Options = make([]string, len(c.Options))
@@ -364,20 +378,7 @@ func (s *ProcessState) Unmarshal(state *app.State) {
s.Memory = state.Memory
s.CPU = json.ToNumber(state.CPU)
s.LimitMode = state.LimitMode
- s.Resources.CPU = ProcessUsageCPU{
- NCPU: json.ToNumber(state.Resources.CPU.NCPU),
- Current: json.ToNumber(state.Resources.CPU.Current),
- Average: json.ToNumber(state.Resources.CPU.Average),
- Max: json.ToNumber(state.Resources.CPU.Max),
- Limit: json.ToNumber(state.Resources.CPU.Limit),
- IsThrottling: state.Resources.CPU.IsThrottling,
- }
- s.Resources.Memory = ProcessUsageMemory{
- Current: state.Resources.Memory.Current,
- Average: json.ToNumber(state.Resources.Memory.Average),
- Max: state.Resources.Memory.Max,
- Limit: state.Resources.Memory.Limit,
- }
+ s.Resources.Unmarshal(&state.Resources)
s.Command = state.Command
s.Progress.Unmarshal(&state.Progress)
@@ -430,15 +431,15 @@ func (p *ProcessUsageCPU) Marshal() app.ProcessUsageCPU {
}
type ProcessUsageMemory struct {
- Current uint64 `json:"cur" format:"uint64"`
- Average json.Number `json:"avg" swaggertype:"number" jsonschema:"type=number"`
- Max uint64 `json:"max" format:"uint64"`
- Limit uint64 `json:"limit" format:"uint64"`
+ Current uint64 `json:"cur" format:"uint64"`
+ Average uint64 `json:"avg" format:"uint64"`
+ Max uint64 `json:"max" format:"uint64"`
+ Limit uint64 `json:"limit" format:"uint64"`
}
func (p *ProcessUsageMemory) Unmarshal(pp *app.ProcessUsageMemory) {
p.Current = pp.Current
- p.Average = json.ToNumber(pp.Average)
+ p.Average = pp.Average
p.Max = pp.Max
p.Limit = pp.Limit
}
@@ -446,31 +447,120 @@ func (p *ProcessUsageMemory) Unmarshal(pp *app.ProcessUsageMemory) {
func (p *ProcessUsageMemory) Marshal() app.ProcessUsageMemory {
pp := app.ProcessUsageMemory{
Current: p.Current,
+ Average: p.Average,
Max: p.Max,
Limit: p.Limit,
}
+ return pp
+}
+
+type ProcessUsageGPUMemory struct {
+ Current uint64 `json:"cur" format:"uint64"`
+ Average uint64 `json:"avg" format:"uint64"`
+ Max uint64 `json:"max" format:"uint64"`
+ Limit uint64 `json:"limit" format:"uint64"`
+}
+
+func (p *ProcessUsageGPUMemory) Unmarshal(pp *app.ProcessUsageGPUMemory) {
+ p.Current = pp.Current
+ p.Average = pp.Average
+ p.Max = pp.Max
+ p.Limit = pp.Limit
+}
+
+func (p *ProcessUsageGPUMemory) Marshal() app.ProcessUsageGPUMemory {
+ pp := app.ProcessUsageGPUMemory{
+ Current: p.Current,
+ Average: p.Average,
+ Max: p.Max,
+ Limit: p.Limit,
+ }
+
+ return pp
+}
+
+type ProcessUsageGPUUsage struct {
+ Current json.Number `json:"cur" swaggertype:"number" jsonschema:"type=number"`
+ Average json.Number `json:"avg" swaggertype:"number" jsonschema:"type=number"`
+ Max json.Number `json:"max" swaggertype:"number" jsonschema:"type=number"`
+ Limit json.Number `json:"limit" swaggertype:"number" jsonschema:"type=number"`
+}
+
+func (p *ProcessUsageGPUUsage) Unmarshal(pp *app.ProcessUsageGPUUsage) {
+ p.Current = json.ToNumber(pp.Current)
+ p.Average = json.ToNumber(pp.Average)
+ p.Max = json.ToNumber(pp.Max)
+ p.Limit = json.ToNumber(pp.Limit)
+}
+
+func (p *ProcessUsageGPUUsage) Marshal() app.ProcessUsageGPUUsage {
+ pp := app.ProcessUsageGPUUsage{}
+
+ if x, err := p.Current.Float64(); err == nil {
+ pp.Current = x
+ }
+
if x, err := p.Average.Float64(); err == nil {
pp.Average = x
}
+ if x, err := p.Max.Float64(); err == nil {
+ pp.Max = x
+ }
+
+ if x, err := p.Limit.Float64(); err == nil {
+ pp.Limit = x
+ }
+
+ return pp
+}
+
+type ProcessUsageGPU struct {
+ Index int `json:"index"`
+ Memory ProcessUsageGPUMemory `json:"memory_bytes"`
+ Usage ProcessUsageGPUUsage `json:"usage"`
+ Encoder ProcessUsageGPUUsage `json:"encoder"`
+ Decoder ProcessUsageGPUUsage `json:"decoder"`
+}
+
+func (p *ProcessUsageGPU) Unmarshal(pp *app.ProcessUsageGPU) {
+ p.Index = pp.Index
+ p.Memory.Unmarshal(&pp.Memory)
+ p.Usage.Unmarshal(&pp.Usage)
+ p.Encoder.Unmarshal(&pp.Encoder)
+ p.Decoder.Unmarshal(&pp.Decoder)
+}
+
+func (p *ProcessUsageGPU) Marshal() app.ProcessUsageGPU {
+ pp := app.ProcessUsageGPU{
+ Index: p.Index,
+ Memory: p.Memory.Marshal(),
+ Usage: p.Usage.Marshal(),
+ Encoder: p.Encoder.Marshal(),
+ Decoder: p.Decoder.Marshal(),
+ }
+
return pp
}
type ProcessUsage struct {
CPU ProcessUsageCPU `json:"cpu_usage"`
Memory ProcessUsageMemory `json:"memory_bytes"`
+ GPU ProcessUsageGPU `json:"gpu"`
}
func (p *ProcessUsage) Unmarshal(pp *app.ProcessUsage) {
p.CPU.Unmarshal(&pp.CPU)
p.Memory.Unmarshal(&pp.Memory)
+ p.GPU.Unmarshal(&pp.GPU)
}
func (p *ProcessUsage) Marshal() app.ProcessUsage {
pp := app.ProcessUsage{
CPU: p.CPU.Marshal(),
Memory: p.Memory.Marshal(),
+ GPU: p.GPU.Marshal(),
}
return pp
diff --git a/http/api/process_test.go b/http/api/process_test.go
index 6dddce39..ddbdfbf8 100644
--- a/http/api/process_test.go
+++ b/http/api/process_test.go
@@ -56,6 +56,33 @@ func TestProcessUsage(t *testing.T) {
Max: 150,
Limit: 200,
},
+ GPU: app.ProcessUsageGPU{
+ Index: 3,
+ Memory: app.ProcessUsageGPUMemory{
+ Current: 48,
+ Average: 43,
+ Max: 88,
+ Limit: 34,
+ },
+ Usage: app.ProcessUsageGPUUsage{
+ Current: 47,
+ Average: 22,
+ Max: 90,
+ Limit: 80,
+ },
+ Encoder: app.ProcessUsageGPUUsage{
+ Current: 48,
+ Average: 46,
+ Max: 74,
+ Limit: 46,
+ },
+ Decoder: app.ProcessUsageGPUUsage{
+ Current: 21,
+ Average: 42,
+ Max: 30,
+ Limit: 99,
+ },
+ },
}
p := ProcessUsage{}
@@ -103,7 +130,13 @@ func TestProcessConfig(t *testing.T) {
LogPatterns: []string{"bla", "blubb"},
LimitCPU: 10,
LimitMemory: 100 * 1024 * 1024,
- LimitWaitFor: 20,
+ LimitGPU: app.ConfigLimitGPU{
+ Usage: 50,
+ Encoder: 90,
+ Decoder: 80,
+ Memory: 24 * 1024 * 1024 * 1024,
+ },
+ LimitWaitFor: 20,
}
p := ProcessConfig{}
diff --git a/http/api/progress.go b/http/api/progress.go
index 93671b57..05976060 100644
--- a/http/api/progress.go
+++ b/http/api/progress.go
@@ -23,6 +23,8 @@ type ProgressIO struct {
Type string `json:"type"`
Codec string `json:"codec"`
Coder string `json:"coder"`
+ Profile int `json:"profile"`
+ Level int `json:"level"`
Frame uint64 `json:"frame" format:"uint64"`
Keyframe uint64 `json:"keyframe" format:"uint64"`
Framerate ProgressIOFramerate `json:"framerate"`
@@ -40,9 +42,10 @@ type ProgressIO struct {
Height uint64 `json:"height,omitempty" format:"uint64"`
// Audio
- Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"`
- Layout string `json:"layout,omitempty"`
- Channels uint64 `json:"channels,omitempty" format:"uint64"`
+ Samplefmt string `json:"sample_fmt,omitempty"`
+ Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"`
+ Layout string `json:"layout,omitempty"`
+ Channels uint64 `json:"channels,omitempty" format:"uint64"`
// avstream
AVstream *AVstream `json:"avstream" jsonschema:"anyof_type=null;object"`
@@ -62,6 +65,8 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) {
i.Type = io.Type
i.Codec = io.Codec
i.Coder = io.Coder
+ i.Profile = io.Profile
+ i.Level = io.Level
i.Frame = io.Frame
i.Keyframe = io.Keyframe
i.Framerate.Min = json.ToNumber(io.Framerate.Min)
@@ -77,6 +82,7 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) {
i.Quantizer = json.ToNumber(io.Quantizer)
i.Width = io.Width
i.Height = io.Height
+ i.Samplefmt = io.Samplefmt
i.Sampling = io.Sampling
i.Layout = io.Layout
i.Channels = io.Channels
@@ -97,6 +103,8 @@ func (i *ProgressIO) Marshal() app.ProgressIO {
Type: i.Type,
Codec: i.Codec,
Coder: i.Coder,
+ Profile: i.Profile,
+ Level: i.Level,
Frame: i.Frame,
Keyframe: i.Keyframe,
Packet: i.Packet,
@@ -105,6 +113,7 @@ func (i *ProgressIO) Marshal() app.ProgressIO {
Pixfmt: i.Pixfmt,
Width: i.Width,
Height: i.Height,
+ Samplefmt: i.Samplefmt,
Sampling: i.Sampling,
Layout: i.Layout,
Channels: i.Channels,
diff --git a/http/api/progress_test.go b/http/api/progress_test.go
index 812e434c..3ccd7926 100644
--- a/http/api/progress_test.go
+++ b/http/api/progress_test.go
@@ -97,6 +97,8 @@ func TestProgressIO(t *testing.T) {
Type: "video",
Codec: "x",
Coder: "y",
+ Profile: 848,
+ Level: 48,
Frame: 133,
Keyframe: 39,
Framerate: app.ProgressIOFramerate{
@@ -114,6 +116,7 @@ func TestProgressIO(t *testing.T) {
Quantizer: 494.2,
Width: 10393,
Height: 4933,
+ Samplefmt: "fltp",
Sampling: 58483,
Layout: "atmos",
Channels: 4944,
@@ -137,6 +140,8 @@ func TestProgressIOAVstream(t *testing.T) {
Type: "video",
Codec: "x",
Coder: "y",
+ Profile: 848,
+ Level: 48,
Frame: 133,
Keyframe: 39,
Framerate: app.ProgressIOFramerate{
@@ -154,6 +159,7 @@ func TestProgressIOAVstream(t *testing.T) {
Quantizer: 494.2,
Width: 10393,
Height: 4933,
+ Samplefmt: "fltp",
Sampling: 58483,
Layout: "atmos",
Channels: 4944,
@@ -203,6 +209,8 @@ func TestProgress(t *testing.T) {
Type: "video",
Codec: "x",
Coder: "y",
+ Profile: 848,
+ Level: 48,
Frame: 133,
Keyframe: 39,
Framerate: app.ProgressIOFramerate{
@@ -220,6 +228,7 @@ func TestProgress(t *testing.T) {
Quantizer: 494.2,
Width: 10393,
Height: 4933,
+ Samplefmt: "fltp",
Sampling: 58483,
Layout: "atmos",
Channels: 4944,
@@ -259,6 +268,8 @@ func TestProgress(t *testing.T) {
Type: "video",
Codec: "x",
Coder: "y",
+ Profile: 848,
+ Level: 48,
Frame: 133,
Keyframe: 39,
Framerate: app.ProgressIOFramerate{
@@ -276,6 +287,7 @@ func TestProgress(t *testing.T) {
Quantizer: 494.2,
Width: 10393,
Height: 4933,
+ Samplefmt: "fltp",
Sampling: 58483,
Layout: "atmos",
Channels: 4944,
diff --git a/http/client/client.go b/http/client/client.go
index d73fab98..d877076b 100644
--- a/http/client/client.go
+++ b/http/client/client.go
@@ -1,7 +1,6 @@
package client
import (
- "bytes"
"context"
"fmt"
"io"
@@ -15,6 +14,7 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/http/api"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/restream/app"
"github.com/Masterminds/semver/v3"
@@ -167,8 +167,13 @@ type Config struct {
// Auth0Token is a valid Auth0 token to authorize access to the API.
Auth0Token string
- // Client is a HTTPClient that will be used for the API calls. Optional.
+ // Client is a HTTPClient that will be used for the API calls. Optional. Don't
+ // set a timeout in the client if you want to use the timeout in this config.
Client HTTPClient
+
+ // Timeout is the timeout for the whole connection. Don't set a timeout in
+ // the optional HTTPClient as it will override this timeout.
+ Timeout time.Duration
}
type apiconstraint struct {
@@ -178,16 +183,17 @@ type apiconstraint struct {
// restclient implements the RestClient interface.
type restclient struct {
- address string
- prefix string
- accessToken Token
- refreshToken Token
- username string
- password string
- auth0Token string
- client HTTPClient
- about api.About
- aboutLock sync.RWMutex
+ address string
+ prefix string
+ accessToken Token
+ refreshToken Token
+ username string
+ password string
+ auth0Token string
+ client HTTPClient
+ clientTimeout time.Duration
+ about api.About
+ aboutLock sync.RWMutex
version struct {
connectedCore *semver.Version
@@ -199,12 +205,13 @@ type restclient struct {
// in case of an error.
func New(config Config) (RestClient, error) {
r := &restclient{
- address: config.Address,
- prefix: "/api",
- username: config.Username,
- password: config.Password,
- auth0Token: config.Auth0Token,
- client: config.Client,
+ address: config.Address,
+ prefix: "/api",
+ username: config.Username,
+ password: config.Password,
+ auth0Token: config.Auth0Token,
+ client: config.Client,
+ clientTimeout: config.Timeout,
}
if len(config.AccessToken) != 0 {
@@ -645,12 +652,13 @@ func (r *restclient) login() error {
login.Password = r.password
}
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(login)
- req, err := http.NewRequest("POST", r.address+r.prefix+"/login", &buf)
+ req, err := http.NewRequest("POST", r.address+r.prefix+"/login", buf.Reader())
if err != nil {
return err
}
@@ -806,26 +814,11 @@ func (r *restclient) info() (api.About, error) {
}
func (r *restclient) request(req *http.Request) (int, io.ReadCloser, error) {
- /*
- fmt.Printf("%s %s\n", req.Method, req.URL)
- for key, value := range req.Header {
- for _, v := range value {
- fmt.Printf("%s: %s\n", key, v)
- }
- }
- fmt.Printf("\n")
- */
resp, err := r.client.Do(req)
if err != nil {
return -1, nil, err
}
- /*
- for key, value := range resp.Header {
- for _, v := range value {
- fmt.Printf("%s: %s\n", key, v)
- }
- }
- */
+
reader := resp.Body
contentEncoding := resp.Header.Get("Content-Encoding")
@@ -923,7 +916,7 @@ func (r *restclient) stream(ctx context.Context, method, path string, query *url
}
func (r *restclient) call(method, path string, query *url.Values, header http.Header, contentType string, data io.Reader) ([]byte, error) {
- ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
+ ctx, cancel := context.WithTimeout(context.Background(), r.clientTimeout)
defer cancel()
body, err := r.stream(ctx, method, path, query, header, contentType, data)
diff --git a/http/client/events.go b/http/client/events.go
index 36e7063b..97909edb 100644
--- a/http/client/events.go
+++ b/http/client/events.go
@@ -1,25 +1,26 @@
package client
import (
- "bytes"
"context"
"io"
"net/http"
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/http/api"
+ "github.com/datarhei/core/v16/mem"
)
func (r *restclient) Events(ctx context.Context, filters api.EventFilters) (<-chan api.Event, error) {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(filters)
header := make(http.Header)
header.Set("Accept", "application/x-json-stream")
- stream, err := r.stream(ctx, "POST", "/v3/events", nil, header, "application/json", &buf)
+ stream, err := r.stream(ctx, "POST", "/v3/events", nil, header, "application/json", buf.Reader())
if err != nil {
return nil, err
}
diff --git a/http/client/process.go b/http/client/process.go
index 38a41d18..8de34aa8 100644
--- a/http/client/process.go
+++ b/http/client/process.go
@@ -1,12 +1,12 @@
package client
import (
- "bytes"
"net/url"
"strings"
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/http/api"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/restream/app"
)
@@ -66,15 +66,16 @@ func (r *restclient) Process(id app.ProcessID, filter []string) (api.Process, er
}
func (r *restclient) ProcessAdd(p *app.Config, metadata map[string]interface{}) error {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
config := api.ProcessConfig{}
config.Unmarshal(p, metadata)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(config)
- _, err := r.call("POST", "/v3/process", nil, nil, "application/json", &buf)
+ _, err := r.call("POST", "/v3/process", nil, nil, "application/json", buf.Reader())
if err != nil {
return err
}
@@ -83,18 +84,19 @@ func (r *restclient) ProcessAdd(p *app.Config, metadata map[string]interface{})
}
func (r *restclient) ProcessUpdate(id app.ProcessID, p *app.Config, metadata map[string]interface{}) error {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
config := api.ProcessConfig{}
config.Unmarshal(p, metadata)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(config)
query := &url.Values{}
query.Set("domain", id.Domain)
- _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID), query, nil, "application/json", &buf)
+ _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID), query, nil, "application/json", buf.Reader())
if err != nil {
return err
}
@@ -103,18 +105,19 @@ func (r *restclient) ProcessUpdate(id app.ProcessID, p *app.Config, metadata map
}
func (r *restclient) ProcessReportSet(id app.ProcessID, report *app.Report) error {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
data := api.ProcessReport{}
data.Unmarshal(report)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(data)
query := &url.Values{}
query.Set("domain", id.Domain)
- _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/report", query, nil, "application/json", &buf)
+ _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/report", query, nil, "application/json", buf.Reader())
if err != nil {
return err
}
@@ -132,9 +135,10 @@ func (r *restclient) ProcessDelete(id app.ProcessID) error {
}
func (r *restclient) ProcessCommand(id app.ProcessID, command string) error {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(api.Command{
Command: command,
})
@@ -142,7 +146,7 @@ func (r *restclient) ProcessCommand(id app.ProcessID, command string) error {
query := &url.Values{}
query.Set("domain", id.Domain)
- _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/command", query, nil, "application/json", &buf)
+ _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/command", query, nil, "application/json", buf.Reader())
return err
}
@@ -170,15 +174,16 @@ func (r *restclient) ProcessMetadata(id app.ProcessID, key string) (api.Metadata
}
func (r *restclient) ProcessMetadataSet(id app.ProcessID, key string, metadata api.Metadata) error {
- var buf bytes.Buffer
+ buf := mem.Get()
+ defer mem.Put(buf)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(metadata)
query := &url.Values{}
query.Set("domain", id.Domain)
- _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/metadata/"+url.PathEscape(key), query, nil, "application/json", &buf)
+ _, err := r.call("PUT", "/v3/process/"+url.PathEscape(id.ID)+"/metadata/"+url.PathEscape(key), query, nil, "application/json", buf.Reader())
return err
}
@@ -201,15 +206,17 @@ func (r *restclient) ProcessProbe(id app.ProcessID) (api.Probe, error) {
func (r *restclient) ProcessProbeConfig(p *app.Config) (api.Probe, error) {
var probe api.Probe
- var buf bytes.Buffer
+
+ buf := mem.Get()
+ defer mem.Put(buf)
config := api.ProcessConfig{}
config.Unmarshal(p, nil)
- e := json.NewEncoder(&buf)
+ e := json.NewEncoder(buf)
e.Encode(config)
- data, err := r.call("POST", "/v3/process/probe", nil, nil, "application/json", &buf)
+ data, err := r.call("POST", "/v3/process/probe", nil, nil, "application/json", buf.Reader())
if err != nil {
return probe, err
}
diff --git a/http/fs/fs.go b/http/fs/fs.go
index 500ab733..d1f5cc74 100644
--- a/http/fs/fs.go
+++ b/http/fs/fs.go
@@ -17,7 +17,6 @@ type FS struct {
DefaultFile string
DefaultContentType string
- Gzip bool
Filesystem fs.Filesystem
diff --git a/http/handler/api/about.go b/http/handler/api/about.go
index a9add8ca..01b8d578 100644
--- a/http/handler/api/about.go
+++ b/http/handler/api/about.go
@@ -6,6 +6,7 @@ import (
"github.com/datarhei/core/v16/app"
"github.com/datarhei/core/v16/http/api"
+ "github.com/datarhei/core/v16/resources"
"github.com/datarhei/core/v16/restream"
"github.com/labstack/echo/v4"
@@ -14,15 +15,17 @@ import (
// The AboutHandler type provides handler functions for retrieving details
// about the API version and build infos.
type AboutHandler struct {
- restream restream.Restreamer
- auths func() []string
+ restream restream.Restreamer
+ resources resources.Resources
+ auths func() []string
}
// NewAbout returns a new About type
-func NewAbout(restream restream.Restreamer, auths func() []string) *AboutHandler {
+func NewAbout(restream restream.Restreamer, resources resources.Resources, auths func() []string) *AboutHandler {
return &AboutHandler{
- restream: restream,
- auths: auths,
+ restream: restream,
+ resources: resources,
+ auths: auths,
}
}
@@ -41,7 +44,7 @@ func (p *AboutHandler) About(c echo.Context) error {
return c.JSON(http.StatusOK, api.MinimalAbout{
App: app.Name,
Auths: p.auths(),
- Version: api.VersionMinimal{
+ Version: api.AboutVersionMinimal{
Number: app.Version.MajorString(),
},
})
@@ -56,7 +59,7 @@ func (p *AboutHandler) About(c echo.Context) error {
ID: p.restream.ID(),
CreatedAt: createdAt.Format(time.RFC3339),
Uptime: uint64(time.Since(createdAt).Seconds()),
- Version: api.Version{
+ Version: api.AboutVersion{
Number: app.Version.String(),
Commit: app.Commit,
Branch: app.Branch,
@@ -66,5 +69,32 @@ func (p *AboutHandler) About(c echo.Context) error {
},
}
+ if p.resources != nil {
+ res := p.resources.Info()
+
+ about.Resources.IsThrottling = res.CPU.Throttling
+ about.Resources.NCPU = res.CPU.NCPU
+ about.Resources.CPU = (100 - res.CPU.Idle) * res.CPU.NCPU
+ about.Resources.CPULimit = res.CPU.Limit * res.CPU.NCPU
+ about.Resources.CPUCore = res.CPU.Core * res.CPU.NCPU
+ about.Resources.Mem = res.Mem.Total - res.Mem.Available
+ about.Resources.MemLimit = res.Mem.Limit
+ about.Resources.MemTotal = res.Mem.Total
+ about.Resources.MemCore = res.Mem.Core
+
+ about.Resources.GPU = make([]api.AboutGPUResources, len(res.GPU.GPU))
+ for i, gpu := range res.GPU.GPU {
+ about.Resources.GPU[i] = api.AboutGPUResources{
+ Mem: gpu.MemoryUsed,
+ MemLimit: gpu.MemoryLimit,
+ MemTotal: gpu.MemoryTotal,
+ Usage: gpu.Usage,
+ UsageLimit: gpu.UsageLimit,
+ Encoder: gpu.Encoder,
+ Decoder: gpu.Decoder,
+ }
+ }
+ }
+
return c.JSON(http.StatusOK, about)
}
diff --git a/http/handler/api/about_test.go b/http/handler/api/about_test.go
index a2f039b5..5ef491fa 100644
--- a/http/handler/api/about_test.go
+++ b/http/handler/api/about_test.go
@@ -6,6 +6,8 @@ import (
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/mock"
+ "github.com/datarhei/core/v16/internal/mock/resources"
+ "github.com/datarhei/core/v16/internal/mock/restream"
"github.com/stretchr/testify/require"
"github.com/labstack/echo/v4"
@@ -14,12 +16,12 @@ import (
func getDummyAboutRouter() (*echo.Echo, error) {
router := mock.DummyEcho()
- rs, err := mock.DummyRestreamer("../../mock")
+ rs, err := restream.New(nil, nil, nil, nil)
if err != nil {
return nil, err
}
- handler := NewAbout(rs, func() []string { return []string{} })
+ handler := NewAbout(rs, resources.New(), func() []string { return []string{} })
router.Add("GET", "/", handler.About)
diff --git a/http/handler/api/cluster.go b/http/handler/api/cluster.go
index 10841647..35185e96 100644
--- a/http/handler/api/cluster.go
+++ b/http/handler/api/cluster.go
@@ -123,9 +123,22 @@ func (h *ClusterHandler) marshalClusterNode(node cluster.ClusterNode) api.Cluste
MemLimit: node.Resources.MemLimit,
MemTotal: node.Resources.MemTotal,
MemCore: node.Resources.MemCore,
+ GPU: []api.ClusterNodeGPUResources{},
},
}
+ for _, gpu := range node.Resources.GPU {
+ n.Resources.GPU = append(n.Resources.GPU, api.ClusterNodeGPUResources{
+ Mem: gpu.Mem,
+ MemLimit: gpu.MemLimit,
+ MemTotal: gpu.MemTotal,
+ Usage: gpu.Usage,
+ UsageLimit: gpu.UsageLimit,
+ Encoder: gpu.Encoder,
+ Decoder: gpu.Decoder,
+ })
+ }
+
if node.Error != nil {
n.Error = node.Error.Error()
}
diff --git a/http/handler/api/cluster_events.go b/http/handler/api/cluster_events.go
new file mode 100644
index 00000000..7072a44c
--- /dev/null
+++ b/http/handler/api/cluster_events.go
@@ -0,0 +1,144 @@
+package api
+
+import (
+ "context"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/datarhei/core/v16/encoding/json"
+ "github.com/datarhei/core/v16/http/api"
+ "github.com/datarhei/core/v16/http/handler/util"
+
+ "github.com/labstack/echo/v4"
+)
+
+// Events returns a stream of event
+// @Summary Stream of events
+// @Description Stream of events of whats happening on each node in the cluster
+// @ID cluster-3-events
+// @Tags v16.?.?
+// @Accept json
+// @Produce text/event-stream
+// @Produce json-stream
+// @Param filters body api.EventFilters false "Event filters"
+// @Success 200 {object} api.Event
+// @Security ApiKeyAuth
+// @Router /api/v3/cluster/events [post]
+func (h *ClusterHandler) Events(c echo.Context) error {
+ filters := api.EventFilters{}
+
+ if err := util.ShouldBindJSON(c, &filters); err != nil {
+ return api.Err(http.StatusBadRequest, "", "invalid JSON: %s", err.Error())
+ }
+
+ filter := map[string]*api.EventFilter{}
+
+ for _, f := range filters.Filters {
+ f := f
+
+ if err := f.Compile(); err != nil {
+ return api.Err(http.StatusBadRequest, "", "invalid filter: %s: %s", f.Component, err.Error())
+ }
+
+ component := strings.ToLower(f.Component)
+ filter[component] = &f
+ }
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+
+ req := c.Request()
+ reqctx := req.Context()
+
+ contentType := "text/event-stream"
+ accept := req.Header.Get(echo.HeaderAccept)
+ if strings.Contains(accept, "application/x-json-stream") {
+ contentType = "application/x-json-stream"
+ }
+
+ res := c.Response()
+
+ res.Header().Set(echo.HeaderContentType, contentType+"; charset=UTF-8")
+ res.Header().Set(echo.HeaderCacheControl, "no-store")
+ res.Header().Set(echo.HeaderConnection, "close")
+ res.WriteHeader(http.StatusOK)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ evts, err := h.proxy.Events(ctx, filters)
+ if err != nil {
+ return api.Err(http.StatusInternalServerError, "", "%s", err.Error())
+ }
+
+ enc := json.NewEncoder(res)
+ enc.SetIndent("", "")
+
+ done := make(chan error, 1)
+
+ filterEvent := func(event *api.Event) bool {
+ if len(filter) == 0 {
+ return true
+ }
+
+ f, ok := filter[event.Component]
+ if !ok {
+ return false
+ }
+
+ return event.Filter(f)
+ }
+
+ if contentType == "text/event-stream" {
+ res.Write([]byte(":keepalive\n\n"))
+ res.Flush()
+
+ for {
+ select {
+ case err := <-done:
+ return err
+ case <-reqctx.Done():
+ done <- nil
+ case <-ticker.C:
+ res.Write([]byte(":keepalive\n\n"))
+ res.Flush()
+ case event := <-evts:
+ if !filterEvent(&event) {
+ continue
+ }
+
+ res.Write([]byte("event: " + event.Component + "\ndata: "))
+ if err := enc.Encode(event); err != nil {
+ done <- err
+ }
+ res.Write([]byte("\n"))
+ res.Flush()
+ }
+ }
+ } else {
+ res.Write([]byte("{\"event\": \"keepalive\"}\n"))
+ res.Flush()
+
+ for {
+ select {
+ case err := <-done:
+ return err
+ case <-reqctx.Done():
+ done <- nil
+ case <-ticker.C:
+ res.Write([]byte("{\"event\": \"keepalive\"}\n"))
+ res.Flush()
+ case event := <-evts:
+ if !filterEvent(&event) {
+ continue
+ }
+
+ if err := enc.Encode(event); err != nil {
+ done <- err
+ }
+ res.Flush()
+ }
+ }
+ }
+}
diff --git a/http/handler/api/cluster_node.go b/http/handler/api/cluster_node.go
index c3bfee05..87fd2981 100644
--- a/http/handler/api/cluster_node.go
+++ b/http/handler/api/cluster_node.go
@@ -86,7 +86,7 @@ func (h *ClusterHandler) NodeGet(c echo.Context) error {
// @ID cluster-3-get-node-version
// @Produce json
// @Param id path string true "Node ID"
-// @Success 200 {object} api.Version
+// @Success 200 {object} api.AboutVersion
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/cluster/node/{id}/version [get]
@@ -100,7 +100,7 @@ func (h *ClusterHandler) NodeGetVersion(c echo.Context) error {
v := peer.CoreAbout()
- version := api.Version{
+ version := api.AboutVersion{
Number: v.Version.Number,
Commit: v.Version.Commit,
Branch: v.Version.Branch,
@@ -373,7 +373,7 @@ func (h *ClusterHandler) NodeListProcesses(c echo.Context) error {
processes := []api.Process{}
for _, p := range procs {
- if !h.iam.Enforce(ctxuser, domain, "process", p.Config.ID, "read") {
+ if !h.iam.Enforce(ctxuser, domain, "process", p.ID, "read") {
continue
}
diff --git a/http/handler/api/config_test.go b/http/handler/api/config_test.go
index 0757cfb0..69d67f9e 100644
--- a/http/handler/api/config_test.go
+++ b/http/handler/api/config_test.go
@@ -51,7 +51,7 @@ func TestConfigSetConflict(t *testing.T) {
router, _ := getDummyConfigRouter(t)
cfg := config.New(nil)
- cfg.Storage.MimeTypes = "/path/to/mime.types"
+ cfg.Storage.MimeTypesFile = "/path/to/mime.types"
var data bytes.Buffer
diff --git a/http/handler/api/process_test.go b/http/handler/api/process_test.go
index bf076af3..ff607405 100644
--- a/http/handler/api/process_test.go
+++ b/http/handler/api/process_test.go
@@ -14,6 +14,7 @@ import (
"github.com/datarhei/core/v16/iam"
"github.com/datarhei/core/v16/iam/identity"
"github.com/datarhei/core/v16/iam/policy"
+ "github.com/datarhei/core/v16/internal/mock/restream"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
@@ -27,7 +28,7 @@ type Response struct {
}
func getDummyRestreamHandler() (*ProcessHandler, error) {
- rs, err := mock.DummyRestreamer("../../mock")
+ rs, err := restream.New(nil, nil, nil, nil)
if err != nil {
return nil, err
}
diff --git a/http/handler/api/widget_test.go b/http/handler/api/widget_test.go
index 46ab913d..f3a42b12 100644
--- a/http/handler/api/widget_test.go
+++ b/http/handler/api/widget_test.go
@@ -8,10 +8,11 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/mock"
+ mockrs "github.com/datarhei/core/v16/internal/mock/restream"
"github.com/datarhei/core/v16/restream"
- "github.com/stretchr/testify/require"
"github.com/labstack/echo/v4"
+ "github.com/stretchr/testify/require"
)
func getDummyWidgetHandler(rs restream.Restreamer) (*WidgetHandler, error) {
@@ -37,7 +38,7 @@ func getDummyWidgetRouter(rs restream.Restreamer) (*echo.Echo, error) {
}
func TestWidget(t *testing.T) {
- rs, err := mock.DummyRestreamer("../../mock")
+ rs, err := mockrs.New(nil, nil, nil, nil)
require.NoError(t, err)
router, err := getDummyWidgetRouter(rs)
diff --git a/http/json.go b/http/json.go
index 83367a4a..fd2c1205 100644
--- a/http/json.go
+++ b/http/json.go
@@ -14,11 +14,8 @@ type GoJSONSerializer struct{}
// Serialize converts an interface into a json and writes it to the response.
// You can optionally use the indent parameter to produce pretty JSONs.
-func (d GoJSONSerializer) Serialize(c echo.Context, i interface{}, indent string) error {
+func (d GoJSONSerializer) Serialize(c echo.Context, i interface{}, _ string) error {
enc := json.NewEncoder(c.Response())
- if indent != "" {
- enc.SetIndent("", indent)
- }
return enc.Encode(i)
}
diff --git a/http/middleware/cache/cache.go b/http/middleware/cache/cache.go
index 3b25fc17..f7d12aa9 100644
--- a/http/middleware/cache/cache.go
+++ b/http/middleware/cache/cache.go
@@ -2,13 +2,13 @@
package cache
import (
- "bytes"
"fmt"
"net/http"
"path"
"strings"
"github.com/datarhei/core/v16/http/cache"
+ "github.com/datarhei/core/v16/mem"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
@@ -78,6 +78,7 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
w := &cacheWriter{
header: writer.Header().Clone(),
+ body: mem.Get(),
}
res.Writer = w
@@ -170,7 +171,7 @@ type cacheObject struct {
type cacheWriter struct {
code int
header http.Header
- body bytes.Buffer
+ body *mem.Buffer
}
func (w *cacheWriter) Header() http.Header {
diff --git a/http/middleware/compress/brotli.go b/http/middleware/compress/brotli.go
index 1f692aa9..813657bc 100644
--- a/http/middleware/compress/brotli.go
+++ b/http/middleware/compress/brotli.go
@@ -15,7 +15,7 @@ func NewBrotli(level Level) Compression {
brotliLevel := brotli.DefaultCompression
if level == BestCompression {
brotliLevel = brotli.BestCompression
- } else {
+ } else if level == BestSpeed {
brotliLevel = brotli.BestSpeed
}
diff --git a/http/middleware/compress/compress.go b/http/middleware/compress/compress.go
index aa5017b2..a92f446b 100644
--- a/http/middleware/compress/compress.go
+++ b/http/middleware/compress/compress.go
@@ -2,14 +2,14 @@ package compress
import (
"bufio"
- "bytes"
"fmt"
"io"
"net"
"net/http"
"strings"
- "sync"
+ "github.com/datarhei/core/v16/mem"
+ "github.com/datarhei/core/v16/slices"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
@@ -27,8 +27,11 @@ type Config struct {
// is used. Optional. Default value 0
MinLength int
- // Schemes is a list of enabled compressiond. Optional. Default [GzipScheme, ZstdScheme]
- Schemes []Scheme
+ // Schemes is a list of enabled compressiond. Optional. Default [gzip]
+ Schemes []string
+
+ // List of content type to compress. If empty, everything will be compressed
+ ContentTypes []string
}
type Compression interface {
@@ -46,28 +49,19 @@ type Compressor interface {
type compressResponseWriter struct {
Compressor
http.ResponseWriter
+ hasHeader bool
wroteHeader bool
wroteBody bool
minLength int
minLengthExceeded bool
- buffer *bytes.Buffer
+ buffer *mem.Buffer
code int
headerContentLength string
scheme string
+ contentTypes []string
+ passThrough bool
}
-type Scheme string
-
-func (s Scheme) String() string {
- return string(s)
-}
-
-const (
- GzipScheme Scheme = "gzip"
- BrotliScheme Scheme = "br"
- ZstdScheme Scheme = "zstd"
-)
-
type Level int
const (
@@ -78,33 +72,11 @@ const (
// DefaultConfig is the default Gzip middleware config.
var DefaultConfig = Config{
- Skipper: middleware.DefaultSkipper,
- Level: DefaultCompression,
- MinLength: 0,
- Schemes: []Scheme{GzipScheme, ZstdScheme},
-}
-
-// ContentTypesSkipper returns a Skipper based on the list of content types
-// that should be compressed. If the list is empty, all responses will be
-// compressed.
-func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
- return func(c echo.Context) bool {
- // If no allowed content types are given, compress all
- if len(contentTypes) == 0 {
- return false
- }
-
- // Iterate through the allowed content types and don't skip if the content type matches
- responseContentType := c.Response().Header().Get(echo.HeaderContentType)
-
- for _, contentType := range contentTypes {
- if strings.Contains(responseContentType, contentType) {
- return false
- }
- }
-
- return true
- }
+ Skipper: middleware.DefaultSkipper,
+ Level: DefaultCompression,
+ MinLength: 0,
+ Schemes: []string{"gzip"},
+ ContentTypes: []string{},
}
// New returns a middleware which compresses HTTP response using a compression
@@ -133,39 +105,39 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
config.Schemes = DefaultConfig.Schemes
}
+ contentTypes := slices.Copy(config.ContentTypes)
+
gzipEnable := false
brotliEnable := false
zstdEnable := false
for _, s := range config.Schemes {
switch s {
- case GzipScheme:
+ case "gzip":
gzipEnable = true
- case BrotliScheme:
+ case "br":
brotliEnable = true
- case ZstdScheme:
+ case "zstd":
zstdEnable = true
}
}
- var gzipPool Compression
- var brotliPool Compression
- var zstdPool Compression
+ var gzipCompressor Compression
+ var brotliCompressor Compression
+ var zstdCompressor Compression
if gzipEnable {
- gzipPool = NewGzip(config.Level)
+ gzipCompressor = NewGzip(config.Level)
}
if brotliEnable {
- brotliPool = NewBrotli(config.Level)
+ brotliCompressor = NewBrotli(config.Level)
}
if zstdEnable {
- zstdPool = NewZstd(config.Level)
+ zstdCompressor = NewZstd(config.Level)
}
- bpool := bufferPool()
-
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if config.Skipper(c) {
@@ -173,62 +145,71 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
}
res := c.Response()
- res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
encodings := c.Request().Header.Get(echo.HeaderAcceptEncoding)
- var pool Compression
- var scheme Scheme
-
- if zstdEnable && strings.Contains(encodings, ZstdScheme.String()) {
- pool = zstdPool
- scheme = ZstdScheme
- } else if brotliEnable && strings.Contains(encodings, BrotliScheme.String()) {
- pool = brotliPool
- scheme = BrotliScheme
- } else if gzipEnable && strings.Contains(encodings, GzipScheme.String()) {
- pool = gzipPool
- scheme = GzipScheme
+ var compress Compression
+ var scheme string
+
+ if zstdEnable && strings.Contains(encodings, "zstd") {
+ compress = zstdCompressor
+ scheme = "zstd"
+ } else if brotliEnable && strings.Contains(encodings, "br") {
+ compress = brotliCompressor
+ scheme = "br"
+ } else if gzipEnable && strings.Contains(encodings, "gzip") {
+ compress = gzipCompressor
+ scheme = "gzip"
}
- if pool != nil {
- w := pool.Acquire()
- if w == nil {
+ if compress != nil {
+ compressor := compress.Acquire()
+ if compressor == nil {
return echo.NewHTTPError(http.StatusInternalServerError, fmt.Errorf("failed to acquire compressor for %s", scheme))
}
rw := res.Writer
- w.Reset(rw)
+ compressor.Reset(rw)
- buf := bpool.Get().(*bytes.Buffer)
- buf.Reset()
+ buffer := mem.Get()
- grw := &compressResponseWriter{Compressor: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf, scheme: scheme.String()}
+ grw := &compressResponseWriter{
+ Compressor: compressor,
+ ResponseWriter: rw,
+ minLength: config.MinLength,
+ buffer: buffer,
+ scheme: scheme,
+ contentTypes: contentTypes,
+ }
defer func() {
- if !grw.wroteBody {
- if res.Header().Get(echo.HeaderContentEncoding) == scheme.String() {
- res.Header().Del(echo.HeaderContentEncoding)
- }
- // We have to reset response to it's pristine state when
- // nothing is written to body or error is returned.
- // See issue #424, #407.
- res.Writer = rw
- w.Reset(io.Discard)
- } else if !grw.minLengthExceeded {
- // If the minimum content length hasn't exceeded, write the uncompressed response
- res.Writer = rw
- if grw.wroteHeader {
- // Restore Content-Length header in case it was deleted
- if len(grw.headerContentLength) != 0 {
- grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
+ if !grw.passThrough {
+ if !grw.wroteBody {
+ if res.Header().Get(echo.HeaderContentEncoding) == scheme {
+ res.Header().Del(echo.HeaderContentEncoding)
+ }
+ // We have to reset response to it's pristine state when
+ // nothing is written to body or error is returned.
+ // See issue #424, #407.
+ res.Writer = rw
+ compressor.Reset(io.Discard)
+ } else if !grw.minLengthExceeded {
+ // If the minimum content length hasn't exceeded, write the uncompressed response
+ res.Writer = rw
+ if grw.wroteHeader {
+ // Restore Content-Length header in case it was deleted
+ if len(grw.headerContentLength) != 0 {
+ grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength)
+ }
+ grw.ResponseWriter.WriteHeader(grw.code)
}
- grw.ResponseWriter.WriteHeader(grw.code)
+ grw.buffer.WriteTo(rw)
+ compressor.Reset(io.Discard)
}
- grw.buffer.WriteTo(rw)
- w.Reset(io.Discard)
+ } else {
+ compressor.Reset(io.Discard)
}
- w.Close()
- bpool.Put(buf)
- pool.Release(w)
+ compressor.Close()
+ mem.Put(buffer)
+ compress.Release(compressor)
}()
res.Writer = grw
@@ -241,17 +222,37 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
func (w *compressResponseWriter) WriteHeader(code int) {
if code == http.StatusNoContent { // Issue #489
- w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
+ w.Header().Del(echo.HeaderContentEncoding)
}
w.headerContentLength = w.Header().Get(echo.HeaderContentLength)
w.Header().Del(echo.HeaderContentLength) // Issue #444
- w.wroteHeader = true
+ if !w.canCompress(w.Header().Get(echo.HeaderContentType)) {
+ w.passThrough = true
+ }
+
+ w.hasHeader = true
// Delay writing of the header until we know if we'll actually compress the response
w.code = code
}
+func (w *compressResponseWriter) canCompress(responseContentType string) bool {
+ // If no content types are given, compress all
+ if len(w.contentTypes) == 0 {
+ return true
+ }
+
+ // Iterate through the allowed content types and don't skip if the content type matches
+ for _, contentType := range w.contentTypes {
+ if strings.Contains(responseContentType, contentType) {
+ return true
+ }
+ }
+
+ return false
+}
+
func (w *compressResponseWriter) Write(b []byte) (int, error) {
if w.Header().Get(echo.HeaderContentType) == "" {
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
@@ -259,6 +260,18 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
w.wroteBody = true
+ if !w.hasHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+
+ if w.passThrough {
+ if !w.wroteHeader {
+ w.ResponseWriter.WriteHeader(w.code)
+ w.wroteHeader = true
+ }
+ return w.ResponseWriter.Write(b)
+ }
+
if !w.minLengthExceeded {
n, err := w.buffer.Write(b)
@@ -267,8 +280,10 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
// The minimum length is exceeded, add Content-Encoding header and write the header
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
- if w.wroteHeader {
+ w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
+ if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
+ w.wroteHeader = true
}
return w.Compressor.Write(w.buffer.Bytes())
@@ -281,12 +296,31 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) {
}
func (w *compressResponseWriter) Flush() {
+ if !w.hasHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+
+ if w.passThrough {
+ if !w.wroteHeader {
+ w.ResponseWriter.WriteHeader(w.code)
+ w.wroteHeader = true
+ }
+
+ if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
+ flusher.Flush()
+ }
+
+ return
+ }
+
if !w.minLengthExceeded {
// Enforce compression
w.minLengthExceeded = true
w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806
- if w.wroteHeader {
+ w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
+ if w.hasHeader {
w.ResponseWriter.WriteHeader(w.code)
+ w.wroteHeader = true
}
w.Compressor.Write(w.buffer.Bytes())
@@ -308,12 +342,3 @@ func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) err
}
return http.ErrNotSupported
}
-
-func bufferPool() sync.Pool {
- return sync.Pool{
- New: func() interface{} {
- b := &bytes.Buffer{}
- return b
- },
- }
-}
diff --git a/http/middleware/compress/compress_test.go b/http/middleware/compress/compress_test.go
index 60888989..2d0d5b8b 100644
--- a/http/middleware/compress/compress_test.go
+++ b/http/middleware/compress/compress_test.go
@@ -58,15 +58,15 @@ func (rcr *nopReadCloseResetter) Reset(r io.Reader) error {
return resetter.Reset(r)
}
-func getTestcases() map[Scheme]func(r io.Reader) (ReadCloseResetter, error) {
- return map[Scheme]func(r io.Reader) (ReadCloseResetter, error){
- GzipScheme: func(r io.Reader) (ReadCloseResetter, error) {
+func getTestcases() map[string]func(r io.Reader) (ReadCloseResetter, error) {
+ return map[string]func(r io.Reader) (ReadCloseResetter, error){
+ "gzip": func(r io.Reader) (ReadCloseResetter, error) {
return gzip.NewReader(r)
},
- BrotliScheme: func(r io.Reader) (ReadCloseResetter, error) {
+ "br": func(r io.Reader) (ReadCloseResetter, error) {
return &nopReadCloseResetter{brotli.NewReader(r)}, nil
},
- ZstdScheme: func(r io.Reader) (ReadCloseResetter, error) {
+ "zstd": func(r io.Reader) (ReadCloseResetter, error) {
reader, err := zstd.NewReader(r)
return &nopReadCloseResetter{reader}, err
},
@@ -77,18 +77,18 @@ func TestCompress(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
rec := httptest.NewRecorder()
- c := e.NewContext(req, rec)
+ ctx := e.NewContext(req, rec)
// Skip if no Accept-Encoding header
- h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ handler := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write([]byte("test")) // For Content-Type sniffing
return nil
})
- h(c)
+ handler(ctx)
assert := assert.New(t)
@@ -96,15 +96,15 @@ func TestCompress(t *testing.T) {
// Compression
req = httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
- c = e.NewContext(req, rec)
- h(c)
- assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
+ ctx = e.NewContext(req, rec)
+ handler(ctx)
+ assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
r, err := reader(rec.Body)
if assert.NoError(err) {
- buf := new(bytes.Buffer)
+ buf := &bytes.Buffer{}
defer r.Close()
buf.ReadFrom(r)
assert.Equal("test", buf.String())
@@ -112,11 +112,11 @@ func TestCompress(t *testing.T) {
// Gzip chunked
req = httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
- c = e.NewContext(req, rec)
- NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ ctx = e.NewContext(req, rec)
+ NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Header().Set("Content-Type", "text/event-stream")
c.Response().Header().Set("Transfer-Encoding", "chunked")
@@ -126,7 +126,7 @@ func TestCompress(t *testing.T) {
// Read the first part of the data
assert.True(rec.Flushed)
- assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
+ assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding))
// Write and flush the second part of the data
c.Response().Write([]byte("tost\n"))
@@ -135,7 +135,7 @@ func TestCompress(t *testing.T) {
// Write the final part of the data and return
c.Response().Write([]byte("tast"))
return nil
- })(c)
+ })(ctx)
buf := new(bytes.Buffer)
r.Reset(rec.Body)
@@ -146,14 +146,53 @@ func TestCompress(t *testing.T) {
}
}
+func TestCompressWithPassthrough(t *testing.T) {
+ schemes := getTestcases()
+
+ for scheme, reader := range schemes {
+ t.Run(scheme, func(t *testing.T) {
+ e := echo.New()
+ e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}, ContentTypes: []string{"text/compress"}}))
+ e.GET("/plain", func(c echo.Context) error {
+ c.Response().Header().Set("Content-Type", "text/plain")
+ c.Response().Write([]byte("testtest"))
+ return nil
+ })
+ e.GET("/compress", func(c echo.Context) error {
+ c.Response().Header().Set("Content-Type", "text/compress")
+ c.Response().Write([]byte("testtest"))
+ return nil
+ })
+ req := httptest.NewRequest(http.MethodGet, "/plain", nil)
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
+ rec := httptest.NewRecorder()
+ e.ServeHTTP(rec, req)
+ assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
+ assert.Equal(t, rec.Body.String(), "testtest")
+
+ req = httptest.NewRequest(http.MethodGet, "/compress", nil)
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
+ rec = httptest.NewRecorder()
+ e.ServeHTTP(rec, req)
+ assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
+ r, err := reader(rec.Body)
+ if assert.NoError(t, err) {
+ buf := new(bytes.Buffer)
+ defer r.Close()
+ buf.ReadFrom(r)
+ assert.Equal(t, "testtest", buf.String())
+ }
+ })
+ }
+}
+
func TestCompressWithMinLength(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
- // Invalid level
- e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []Scheme{scheme}}))
+ e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
c.Response().Write([]byte("test"))
return nil
@@ -163,17 +202,17 @@ func TestCompressWithMinLength(t *testing.T) {
return nil
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
- assert.Contains(t, rec.Body.String(), "test")
+ assert.Equal(t, rec.Body.String(), "test")
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec = httptest.NewRecorder()
e.ServeHTTP(rec, req)
- assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
+ assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
buf := new(bytes.Buffer)
@@ -185,17 +224,60 @@ func TestCompressWithMinLength(t *testing.T) {
}
}
+func TestCompressWithAroundMinLength(t *testing.T) {
+ schemes := getTestcases()
+ minLength := 1000
+
+ for scheme, reader := range schemes {
+ for i := minLength - 64; i < minLength+64; i++ {
+ name := fmt.Sprintf("%s-%d", scheme, i)
+
+ t.Run(name, func(t *testing.T) {
+ data := rand.Bytes(i)
+ e := echo.New()
+ e.Use(NewWithConfig(Config{MinLength: minLength, Schemes: []string{scheme}}))
+ e.GET("/", func(c echo.Context) error {
+ c.Response().Write(data[:1])
+ c.Response().Write(data[1:])
+ return nil
+ })
+ req := httptest.NewRequest(http.MethodGet, "/", nil)
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
+ rec := httptest.NewRecorder()
+ e.ServeHTTP(rec, req)
+
+ if i < minLength {
+ assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
+ res, err := io.ReadAll(rec.Body)
+ if assert.NoError(t, err) {
+ assert.Equal(t, data, res)
+ }
+ } else {
+ assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
+ r, err := reader(rec.Body)
+ if assert.NoError(t, err) {
+ buf := new(bytes.Buffer)
+ defer r.Close()
+ buf.ReadFrom(r)
+ assert.Equal(t, data, buf.Bytes())
+ }
+ }
+ })
+ }
+ }
+}
+
func TestCompressNoContent(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
- h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.NoContent(http.StatusNoContent)
})
if assert.NoError(t, h(c)) {
@@ -211,17 +293,17 @@ func TestCompressEmpty(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
c := e.NewContext(req, rec)
- h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error {
return c.String(http.StatusOK, "")
})
if assert.NoError(t, h(c)) {
- assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding))
+ assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding))
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
r, err := reader(rec.Body)
if assert.NoError(t, err) {
@@ -238,14 +320,14 @@ func TestCompressErrorReturned(t *testing.T) {
schemes := getTestcases()
for scheme := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
- e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
+ e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.GET("/", func(c echo.Context) error {
return echo.ErrNotFound
})
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusNotFound, rec.Code)
@@ -259,12 +341,12 @@ func TestCompressWithStatic(t *testing.T) {
schemes := getTestcases()
for scheme, reader := range schemes {
- t.Run(scheme.String(), func(t *testing.T) {
+ t.Run(scheme, func(t *testing.T) {
e := echo.New()
- e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}}))
+ e.Use(NewWithConfig(Config{Schemes: []string{scheme}}))
e.Static("/test", "./")
req := httptest.NewRequest(http.MethodGet, "/test/compress.go", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
rec := httptest.NewRecorder()
e.ServeHTTP(rec, req)
assert.Equal(t, http.StatusOK, rec.Code)
@@ -292,17 +374,17 @@ func BenchmarkCompress(b *testing.B) {
for i := 1; i <= 18; i++ {
datalen := 2 << i
- data := []byte(rand.String(datalen))
+ data := rand.Bytes(datalen)
for scheme := range schemes {
- name := fmt.Sprintf("%s-%d", scheme.String(), datalen)
+ name := fmt.Sprintf("%s-%d", scheme, datalen)
b.Run(name, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
- h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})
@@ -327,13 +409,13 @@ func BenchmarkCompressJSON(b *testing.B) {
schemes := getTestcases()
for scheme := range schemes {
- b.Run(scheme.String(), func(b *testing.B) {
+ b.Run(scheme, func(b *testing.B) {
e := echo.New()
req := httptest.NewRequest(http.MethodGet, "/", nil)
- req.Header.Set(echo.HeaderAcceptEncoding, scheme.String())
+ req.Header.Set(echo.HeaderAcceptEncoding, scheme)
- h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error {
+ h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error {
c.Response().Write(data)
return nil
})
diff --git a/http/middleware/compress/gogzip.go b/http/middleware/compress/gogzip.go
new file mode 100644
index 00000000..589fbb99
--- /dev/null
+++ b/http/middleware/compress/gogzip.go
@@ -0,0 +1,55 @@
+package compress
+
+import (
+ "compress/gzip"
+ "io"
+ "sync"
+)
+
+type gogzipImpl struct {
+ pool sync.Pool
+}
+
+func NewGoGzip(level Level) Compression {
+ gzipLevel := gzip.DefaultCompression
+ if level == BestCompression {
+ gzipLevel = gzip.BestCompression
+ } else if level == BestSpeed {
+ gzipLevel = gzip.BestSpeed
+ }
+
+ g := &gogzipImpl{
+ pool: sync.Pool{
+ New: func() interface{} {
+ w, err := gzip.NewWriterLevel(io.Discard, gzipLevel)
+ if err != nil {
+ return nil
+ }
+ return w
+ },
+ },
+ }
+
+ return g
+}
+
+func (g *gogzipImpl) Acquire() Compressor {
+ c := g.pool.Get()
+ if c == nil {
+ return nil
+ }
+
+ x, ok := c.(Compressor)
+ if !ok {
+ return nil
+ }
+
+ x.Reset(io.Discard)
+
+ return x
+}
+
+func (g *gogzipImpl) Release(c Compressor) {
+ c.Reset(io.Discard)
+ g.pool.Put(c)
+}
diff --git a/http/middleware/compress/gzip.go b/http/middleware/compress/gzip.go
index f2441fe1..2482d4d2 100644
--- a/http/middleware/compress/gzip.go
+++ b/http/middleware/compress/gzip.go
@@ -15,7 +15,7 @@ func NewGzip(level Level) Compression {
gzipLevel := gzip.DefaultCompression
if level == BestCompression {
gzipLevel = gzip.BestCompression
- } else {
+ } else if level == BestSpeed {
gzipLevel = gzip.BestSpeed
}
diff --git a/http/middleware/compress/zstd.go b/http/middleware/compress/zstd.go
index 3eaacb56..970732c6 100644
--- a/http/middleware/compress/zstd.go
+++ b/http/middleware/compress/zstd.go
@@ -15,7 +15,7 @@ func NewZstd(level Level) Compression {
zstdLevel := zstd.SpeedDefault
if level == BestCompression {
zstdLevel = zstd.SpeedBestCompression
- } else {
+ } else if level == BestSpeed {
zstdLevel = zstd.SpeedFastest
}
diff --git a/http/middleware/hlsrewrite/fixtures/data.txt b/http/middleware/hlsrewrite/fixtures/data.txt
new file mode 100644
index 00000000..78bccdda
--- /dev/null
+++ b/http/middleware/hlsrewrite/fixtures/data.txt
@@ -0,0 +1,29 @@
+#EXTM3U
+#EXT-X-VERSION:6
+#EXT-X-TARGETDURATION:2
+#EXT-X-MEDIA-SEQUENCE:303
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:35.019+0200
+/path/to/foobar/test_0_0_0303.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:37.019+0200
+/path/to/foobar/test_0_0_0304.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:39.019+0200
+/path/to/foobar/test_0_0_0305.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:41.019+0200
+/path/to/foobar/test_0_0_0306.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:43.019+0200
+/path/to/foobar/test_0_0_0307.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:45.019+0200
+/path/to/foobar/test_0_0_0308.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:47.019+0200
+/path/to/foobar/test_0_0_0309.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:49.019+0200
+/path/to/foobar/test_0_0_0310.ts
diff --git a/http/middleware/hlsrewrite/fixtures/data_rewritten.txt b/http/middleware/hlsrewrite/fixtures/data_rewritten.txt
new file mode 100644
index 00000000..a4e2348c
--- /dev/null
+++ b/http/middleware/hlsrewrite/fixtures/data_rewritten.txt
@@ -0,0 +1,29 @@
+#EXTM3U
+#EXT-X-VERSION:6
+#EXT-X-TARGETDURATION:2
+#EXT-X-MEDIA-SEQUENCE:303
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:35.019+0200
+test_0_0_0303.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:37.019+0200
+test_0_0_0304.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:39.019+0200
+test_0_0_0305.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:41.019+0200
+test_0_0_0306.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:43.019+0200
+test_0_0_0307.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:45.019+0200
+test_0_0_0308.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:47.019+0200
+test_0_0_0309.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:49.019+0200
+test_0_0_0310.ts
diff --git a/http/middleware/hlsrewrite/hlsrewrite.go b/http/middleware/hlsrewrite/hlsrewrite.go
index 674228bf..bfc6fd47 100644
--- a/http/middleware/hlsrewrite/hlsrewrite.go
+++ b/http/middleware/hlsrewrite/hlsrewrite.go
@@ -6,6 +6,7 @@ import (
"net/http"
"strings"
+ "github.com/datarhei/core/v16/mem"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
@@ -31,7 +32,7 @@ func NewHLSRewrite() echo.MiddlewareFunc {
}
type hlsrewrite struct {
- pathPrefix string
+ pathPrefix []byte
}
func NewHLSRewriteWithConfig(config HLSRewriteConfig) echo.MiddlewareFunc {
@@ -47,7 +48,7 @@ func NewHLSRewriteWithConfig(config HLSRewriteConfig) echo.MiddlewareFunc {
}
hls := hlsrewrite{
- pathPrefix: pathPrefix,
+ pathPrefix: []byte(pathPrefix),
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
@@ -91,6 +92,7 @@ func (h *hlsrewrite) rewrite(c echo.Context, next echo.HandlerFunc) error {
// the data that we need to rewrite.
rewriter = &hlsRewriter{
ResponseWriter: res.Writer,
+ buffer: mem.Get(),
}
res.Writer = rewriter
@@ -104,16 +106,20 @@ func (h *hlsrewrite) rewrite(c echo.Context, next echo.HandlerFunc) error {
res.Writer = writer
if rewrite {
- if res.Status != 200 {
+ if res.Status == 200 {
+ // Rewrite the data befor sending it to the client
+ buffer := mem.Get()
+ defer mem.Put(buffer)
+
+ rewriter.rewrite(h.pathPrefix, buffer)
+
+ res.Header().Set("Cache-Control", "private")
+ res.Write(buffer.Bytes())
+ } else {
res.Write(rewriter.buffer.Bytes())
- return nil
}
- // Rewrite the data befor sending it to the client
- rewriter.rewrite(h.pathPrefix)
-
- res.Header().Set("Cache-Control", "private")
- res.Write(rewriter.buffer.Bytes())
+ mem.Put(rewriter.buffer)
}
return nil
@@ -121,7 +127,7 @@ func (h *hlsrewrite) rewrite(c echo.Context, next echo.HandlerFunc) error {
type hlsRewriter struct {
http.ResponseWriter
- buffer bytes.Buffer
+ buffer *mem.Buffer
}
func (g *hlsRewriter) Write(data []byte) (int, error) {
@@ -131,34 +137,29 @@ func (g *hlsRewriter) Write(data []byte) (int, error) {
return w, err
}
-func (g *hlsRewriter) rewrite(pathPrefix string) {
- var buffer bytes.Buffer
-
+func (g *hlsRewriter) rewrite(pathPrefix []byte, buffer *mem.Buffer) {
// Find all URLS in the .m3u8 and add the session ID to the query string
- scanner := bufio.NewScanner(&g.buffer)
+ scanner := bufio.NewScanner(g.buffer.Reader())
for scanner.Scan() {
- line := scanner.Text()
+ line := scanner.Bytes()
// Write empty lines unmodified
if len(line) == 0 {
- buffer.WriteString(line + "\n")
+ buffer.Write(line)
+ buffer.WriteByte('\n')
continue
}
// Write comments unmodified
- if strings.HasPrefix(line, "#") {
- buffer.WriteString(line + "\n")
+ if line[0] == '#' {
+ buffer.Write(line)
+ buffer.WriteByte('\n')
continue
}
// Rewrite
- line = strings.TrimPrefix(line, pathPrefix)
- buffer.WriteString(line + "\n")
+ line = bytes.TrimPrefix(line, pathPrefix)
+ buffer.Write(line)
+ buffer.WriteByte('\n')
}
-
- if err := scanner.Err(); err != nil {
- return
- }
-
- g.buffer = buffer
}
diff --git a/http/middleware/hlsrewrite/hlsrewrite_test.go b/http/middleware/hlsrewrite/hlsrewrite_test.go
new file mode 100644
index 00000000..a149feaa
--- /dev/null
+++ b/http/middleware/hlsrewrite/hlsrewrite_test.go
@@ -0,0 +1,49 @@
+package hlsrewrite
+
+import (
+ "os"
+ "testing"
+
+ "github.com/datarhei/core/v16/mem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRewrite(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/data.txt")
+ require.NoError(t, err)
+
+ rewrittendata, err := os.ReadFile("./fixtures/data_rewritten.txt")
+ require.NoError(t, err)
+
+ r := &hlsRewriter{
+ buffer: &mem.Buffer{},
+ }
+
+ r.Write(data)
+
+ buffer := &mem.Buffer{}
+ prefix := []byte("/path/to/foobar/")
+ r.rewrite(prefix, buffer)
+
+ require.Equal(t, rewrittendata, buffer.Bytes())
+}
+
+func BenchmarkRewrite(b *testing.B) {
+ data, err := os.ReadFile("./fixtures/data.txt")
+ require.NoError(b, err)
+
+ r := &hlsRewriter{
+ buffer: &mem.Buffer{},
+ }
+
+ buffer := &mem.Buffer{}
+ prefix := []byte("/path/to/foobar/")
+
+ for i := 0; i < b.N; i++ {
+ r.buffer.Reset()
+ r.Write(data)
+
+ buffer.Reset()
+ r.rewrite(prefix, buffer)
+ }
+}
diff --git a/http/middleware/log/log.go b/http/middleware/log/log.go
index 2a225c27..3a78a6c9 100644
--- a/http/middleware/log/log.go
+++ b/http/middleware/log/log.go
@@ -16,7 +16,7 @@ type Config struct {
// Skipper defines a function to skip middleware.
Skipper middleware.Skipper
Logger log.Logger
- Status func(code int)
+ Status func(code int, method, path string, size int64, ttfb time.Duration)
}
var DefaultConfig = Config{
@@ -76,10 +76,15 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
res.Writer = writer
req.Body = reader
+ if w.ttfb.IsZero() {
+ w.ttfb = start
+ }
+
latency := time.Since(start)
+ ttfb := time.Since(w.ttfb)
if config.Status != nil {
- config.Status(res.Status)
+ config.Status(res.Status, req.Method, c.Path(), w.size, ttfb)
}
if raw != "" {
@@ -87,16 +92,17 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
}
logger := config.Logger.WithFields(log.Fields{
- "client": c.RealIP(),
- "method": req.Method,
- "path": path,
- "proto": req.Proto,
- "status": res.Status,
- "status_text": http.StatusText(res.Status),
- "tx_size_bytes": w.size,
- "rx_size_bytes": r.size,
- "latency_ms": latency.Milliseconds(),
- "user_agent": req.Header.Get("User-Agent"),
+ "client": c.RealIP(),
+ "method": req.Method,
+ "path": path,
+ "proto": req.Proto,
+ "status": res.Status,
+ "status_text": http.StatusText(res.Status),
+ "tx_size_bytes": w.size,
+ "rx_size_bytes": r.size,
+ "latency_ms": latency.Milliseconds(),
+ "latency_ttfb_ms": ttfb.Milliseconds(),
+ "user_agent": req.Header.Get("User-Agent"),
})
logger.Debug().Log("")
@@ -110,12 +116,16 @@ type sizeWriter struct {
http.ResponseWriter
size int64
+ ttfb time.Time
}
func (w *sizeWriter) Write(body []byte) (int, error) {
n, err := w.ResponseWriter.Write(body)
w.size += int64(n)
+ if w.ttfb.IsZero() {
+ w.ttfb = time.Now()
+ }
return n, err
}
diff --git a/http/middleware/session/HLS.go b/http/middleware/session/HLS.go
index 6e2ad5c4..fa03d4e2 100644
--- a/http/middleware/session/HLS.go
+++ b/http/middleware/session/HLS.go
@@ -3,7 +3,6 @@ package session
import (
"bufio"
- "bytes"
"io"
"net/http"
"net/url"
@@ -11,6 +10,7 @@ import (
"path/filepath"
"strings"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/net"
"github.com/lithammer/shortuuid/v4"
@@ -29,7 +29,7 @@ func (h *handler) handleHLS(c echo.Context, ctxuser string, data map[string]inte
return next(c)
}
-func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
+func (h *handler) handleHLSIngress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
path := req.URL.Path
@@ -37,8 +37,9 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
// Read out the path of the .ts files and look them up in the ts-map.
// Add it as ingress for the respective "sessionId". The "sessionId" is the .m3u8 file name.
reader := req.Body
- r := &bodyReader{
+ r := &segmentReader{
reader: req.Body,
+ buffer: mem.Get(),
}
req.Body = r
@@ -46,6 +47,7 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
req.Body = reader
if r.size == 0 {
+ mem.Put(r.buffer)
return
}
@@ -58,8 +60,10 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
h.hlsIngressCollector.Extra(path, data)
}
- h.hlsIngressCollector.Ingress(path, headerSize(req.Header))
+ buffer := mem.Get()
+ h.hlsIngressCollector.Ingress(path, headerSize(req.Header, buffer))
h.hlsIngressCollector.Ingress(path, r.size)
+ mem.Put(buffer)
segments := r.getSegments(urlpath.Dir(path))
@@ -74,6 +78,8 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
}
h.lock.Unlock()
}
+
+ mem.Put(r.buffer)
}()
} else if strings.HasSuffix(path, ".ts") {
// Get the size of the .ts file and store it in the ts-map for later use.
@@ -87,9 +93,11 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
req.Body = reader
if r.size != 0 {
+ buffer := mem.Get()
h.lock.Lock()
- h.rxsegments[path] = r.size + headerSize(req.Header)
+ h.rxsegments[path] = r.size + headerSize(req.Header, buffer)
h.lock.Unlock()
+ mem.Put(buffer)
}
}()
}
@@ -97,7 +105,7 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri
return next(c)
}
-func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
+func (h *handler) handleHLSEgress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
res := c.Response()
@@ -171,6 +179,7 @@ func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[strin
// the data that we need to rewrite.
rewriter = &sessionRewriter{
ResponseWriter: res.Writer,
+ buffer: mem.Get(),
}
res.Writer = rewriter
@@ -188,21 +197,29 @@ func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[strin
if rewrite {
if res.Status < 200 || res.Status >= 300 {
res.Write(rewriter.buffer.Bytes())
+ mem.Put(rewriter.buffer)
return nil
}
+ buffer := mem.Get()
+
// Rewrite the data befor sending it to the client
- rewriter.rewriteHLS(sessionID, c.Request().URL)
+ rewriter.rewriteHLS(sessionID, c.Request().URL, buffer)
res.Header().Set("Cache-Control", "private")
- res.Write(rewriter.buffer.Bytes())
+ res.Write(buffer.Bytes())
+
+ mem.Put(buffer)
+ mem.Put(rewriter.buffer)
}
if isM3U8 || isTS {
if res.Status >= 200 && res.Status < 300 {
// Collect how many bytes we've written in this session
- h.hlsEgressCollector.Egress(sessionID, headerSize(res.Header()))
+ buffer := mem.Get()
+ h.hlsEgressCollector.Egress(sessionID, headerSize(res.Header(), buffer))
h.hlsEgressCollector.Egress(sessionID, res.Size)
+ mem.Put(buffer)
if isTS {
// Activate the session. If the session is already active, this is a noop
@@ -214,13 +231,13 @@ func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[strin
return nil
}
-type bodyReader struct {
+type segmentReader struct {
reader io.ReadCloser
- buffer bytes.Buffer
+ buffer *mem.Buffer
size int64
}
-func (r *bodyReader) Read(b []byte) (int, error) {
+func (r *segmentReader) Read(b []byte) (int, error) {
n, err := r.reader.Read(b)
if n > 0 {
r.buffer.Write(b[:n])
@@ -230,15 +247,15 @@ func (r *bodyReader) Read(b []byte) (int, error) {
return n, err
}
-func (r *bodyReader) Close() error {
+func (r *segmentReader) Close() error {
return r.reader.Close()
}
-func (r *bodyReader) getSegments(dir string) []string {
+func (r *segmentReader) getSegments(dir string) []string {
segments := []string{}
// Find all segment URLs in the .m3u8
- scanner := bufio.NewScanner(&r.buffer)
+ scanner := bufio.NewScanner(r.buffer.Reader())
for scanner.Scan() {
line := scanner.Text()
@@ -280,65 +297,49 @@ func (r *bodyReader) getSegments(dir string) []string {
return segments
}
-type bodysizeReader struct {
- reader io.ReadCloser
- size int64
-}
-
-func (r *bodysizeReader) Read(b []byte) (int, error) {
- n, err := r.reader.Read(b)
- r.size += int64(n)
-
- return n, err
-}
-
-func (r *bodysizeReader) Close() error {
- return r.reader.Close()
-}
-
type sessionRewriter struct {
http.ResponseWriter
- buffer bytes.Buffer
+ buffer *mem.Buffer
}
func (g *sessionRewriter) Write(data []byte) (int, error) {
// Write the data into internal buffer for later rewrite
- w, err := g.buffer.Write(data)
-
- return w, err
+ return g.buffer.Write(data)
}
-func (g *sessionRewriter) rewriteHLS(sessionID string, requestURL *url.URL) {
- var buffer bytes.Buffer
-
+func (g *sessionRewriter) rewriteHLS(sessionID string, requestURL *url.URL, buffer *mem.Buffer) {
isMaster := false
// Find all URLS in the .m3u8 and add the session ID to the query string
- scanner := bufio.NewScanner(&g.buffer)
+ scanner := bufio.NewScanner(g.buffer.Reader())
for scanner.Scan() {
- line := scanner.Text()
+ byteline := scanner.Bytes()
// Write empty lines unmodified
- if len(line) == 0 {
- buffer.WriteString(line + "\n")
+ if len(byteline) == 0 {
+ buffer.Write(byteline)
+ buffer.WriteByte('\n')
continue
}
// Write comments unmodified
- if strings.HasPrefix(line, "#") {
- buffer.WriteString(line + "\n")
+ if byteline[0] == '#' {
+ buffer.Write(byteline)
+ buffer.WriteByte('\n')
continue
}
- u, err := url.Parse(line)
+ u, err := url.Parse(string(byteline))
if err != nil {
- buffer.WriteString(line + "\n")
+ buffer.Write(byteline)
+ buffer.WriteByte('\n')
continue
}
// Write anything that doesn't end in .m3u8 or .ts unmodified
if !strings.HasSuffix(u.Path, ".m3u8") && !strings.HasSuffix(u.Path, ".ts") {
- buffer.WriteString(line + "\n")
+ buffer.Write(byteline)
+ buffer.WriteByte('\n')
continue
}
@@ -407,6 +408,4 @@ func (g *sessionRewriter) rewriteHLS(sessionID string, requestURL *url.URL) {
buffer.WriteString(urlpath.Base(requestURL.Path) + "?" + q.Encode())
}
-
- g.buffer = buffer
}
diff --git a/http/middleware/session/HLS_test.go b/http/middleware/session/HLS_test.go
new file mode 100644
index 00000000..5d27103b
--- /dev/null
+++ b/http/middleware/session/HLS_test.go
@@ -0,0 +1,108 @@
+package session
+
+import (
+ "bytes"
+ "io"
+ "net/url"
+ "os"
+ "testing"
+
+ "github.com/datarhei/core/v16/mem"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHLSSegmentReader(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/segments.txt")
+ require.NoError(t, err)
+
+ r := bytes.NewReader(data)
+
+ br := &segmentReader{
+ reader: io.NopCloser(r),
+ buffer: &mem.Buffer{},
+ }
+
+ _, err = io.ReadAll(br)
+ require.NoError(t, err)
+
+ segments := br.getSegments("/foobar")
+ require.Equal(t, []string{
+ "/foobar/test_0_0_0303.ts",
+ "/foobar/test_0_0_0304.ts",
+ "/foobar/test_0_0_0305.ts",
+ "/foobar/test_0_0_0306.ts",
+ "/foobar/test_0_0_0307.ts",
+ "/foobar/test_0_0_0308.ts",
+ "/foobar/test_0_0_0309.ts",
+ "/foobar/test_0_0_0310.ts",
+ }, segments)
+}
+
+func BenchmarkHLSSegmentReader(b *testing.B) {
+ data, err := os.ReadFile("./fixtures/segments.txt")
+ require.NoError(b, err)
+
+ rd := bytes.NewReader(data)
+ r := io.NopCloser(rd)
+
+ for i := 0; i < b.N; i++ {
+ rd.Reset(data)
+ br := &segmentReader{
+ reader: io.NopCloser(r),
+ buffer: mem.Get(),
+ }
+
+ _, err := io.ReadAll(br)
+ require.NoError(b, err)
+
+ mem.Put(br.buffer)
+ }
+}
+
+func TestHLSRewrite(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/segments.txt")
+ require.NoError(t, err)
+
+ br := &sessionRewriter{
+ buffer: &mem.Buffer{},
+ }
+
+ _, err = br.Write(data)
+ require.NoError(t, err)
+
+ u, err := url.Parse("http://example.com/test.m3u8")
+ require.NoError(t, err)
+
+ buffer := &mem.Buffer{}
+
+ br.rewriteHLS("oT5GV8eWBbRAh4aib5egoK", u, buffer)
+
+ data, err = os.ReadFile("./fixtures/segments_with_session.txt")
+ require.NoError(t, err)
+
+ require.Equal(t, data, buffer.Bytes())
+}
+
+func BenchmarkHLSRewrite(b *testing.B) {
+ data, err := os.ReadFile("./fixtures/segments.txt")
+ require.NoError(b, err)
+
+ u, err := url.Parse("http://example.com/test.m3u8")
+ require.NoError(b, err)
+
+ for i := 0; i < b.N; i++ {
+ br := &sessionRewriter{
+ buffer: mem.Get(),
+ }
+
+ _, err = br.Write(data)
+ require.NoError(b, err)
+
+ buffer := mem.Get()
+
+ br.rewriteHLS("oT5GV8eWBbRAh4aib5egoK", u, buffer)
+
+ mem.Put(br.buffer)
+ mem.Put(buffer)
+ }
+}
diff --git a/http/middleware/session/HTTP.go b/http/middleware/session/HTTP.go
index 615b2058..ccd50127 100644
--- a/http/middleware/session/HTTP.go
+++ b/http/middleware/session/HTTP.go
@@ -3,11 +3,12 @@ package session
import (
"net/url"
+ "github.com/datarhei/core/v16/mem"
"github.com/labstack/echo/v4"
"github.com/lithammer/shortuuid/v4"
)
-func (h *handler) handleHTTP(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error {
+func (h *handler) handleHTTP(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error {
req := c.Request()
res := c.Response()
@@ -30,13 +31,13 @@ func (h *handler) handleHTTP(c echo.Context, ctxuser string, data map[string]int
id := shortuuid.New()
reader := req.Body
- r := &fakeReader{
+ r := &bodysizeReader{
reader: req.Body,
}
req.Body = r
writer := res.Writer
- w := &fakeWriter{
+ w := &bodysizeWriter{
ResponseWriter: res.Writer,
}
res.Writer = w
@@ -44,19 +45,21 @@ func (h *handler) handleHTTP(c echo.Context, ctxuser string, data map[string]int
h.httpCollector.RegisterAndActivate(id, "", location, referrer)
h.httpCollector.Extra(id, data)
- defer h.httpCollector.Close(id)
-
defer func() {
+ buffer := mem.Get()
+
req.Body = reader
- h.httpCollector.Ingress(id, r.size+headerSize(req.Header))
- }()
+ h.httpCollector.Ingress(id, r.size+headerSize(req.Header, buffer))
- defer func() {
res.Writer = writer
- h.httpCollector.Egress(id, w.size+headerSize(res.Header()))
+ h.httpCollector.Egress(id, w.size+headerSize(res.Header(), buffer))
data["code"] = res.Status
h.httpCollector.Extra(id, data)
+
+ h.httpCollector.Close(id)
+
+ mem.Put(buffer)
}()
return next(c)
diff --git a/http/middleware/session/fixtures/segments.txt b/http/middleware/session/fixtures/segments.txt
new file mode 100644
index 00000000..a4e2348c
--- /dev/null
+++ b/http/middleware/session/fixtures/segments.txt
@@ -0,0 +1,29 @@
+#EXTM3U
+#EXT-X-VERSION:6
+#EXT-X-TARGETDURATION:2
+#EXT-X-MEDIA-SEQUENCE:303
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:35.019+0200
+test_0_0_0303.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:37.019+0200
+test_0_0_0304.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:39.019+0200
+test_0_0_0305.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:41.019+0200
+test_0_0_0306.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:43.019+0200
+test_0_0_0307.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:45.019+0200
+test_0_0_0308.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:47.019+0200
+test_0_0_0309.ts
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:49.019+0200
+test_0_0_0310.ts
diff --git a/http/middleware/session/fixtures/segments_with_session.txt b/http/middleware/session/fixtures/segments_with_session.txt
new file mode 100644
index 00000000..f59ed305
--- /dev/null
+++ b/http/middleware/session/fixtures/segments_with_session.txt
@@ -0,0 +1,29 @@
+#EXTM3U
+#EXT-X-VERSION:6
+#EXT-X-TARGETDURATION:2
+#EXT-X-MEDIA-SEQUENCE:303
+#EXT-X-INDEPENDENT-SEGMENTS
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:35.019+0200
+test_0_0_0303.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:37.019+0200
+test_0_0_0304.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:39.019+0200
+test_0_0_0305.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:41.019+0200
+test_0_0_0306.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:43.019+0200
+test_0_0_0307.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:45.019+0200
+test_0_0_0308.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:47.019+0200
+test_0_0_0309.ts?session=oT5GV8eWBbRAh4aib5egoK
+#EXTINF:2.000000,
+#EXT-X-PROGRAM-DATE-TIME:2024-10-09T12:56:49.019+0200
+test_0_0_0310.ts?session=oT5GV8eWBbRAh4aib5egoK
diff --git a/http/middleware/session/session.go b/http/middleware/session/session.go
index 684aa4c6..9cd77955 100644
--- a/http/middleware/session/session.go
+++ b/http/middleware/session/session.go
@@ -1,7 +1,6 @@
package session
import (
- "bytes"
"fmt"
"io"
"net/http"
@@ -13,6 +12,7 @@ import (
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/handler/util"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/session"
"github.com/lithammer/shortuuid/v4"
@@ -173,43 +173,42 @@ func verifySession(raw interface{}, path, referrer string) (map[string]interface
return data, nil
}
-func headerSize(header http.Header) int64 {
- var buffer bytes.Buffer
-
- header.Write(&buffer)
+func headerSize(header http.Header, buffer *mem.Buffer) int64 {
+ buffer.Reset()
+ header.Write(buffer)
return int64(buffer.Len())
}
-type fakeReader struct {
+type bodysizeReader struct {
reader io.ReadCloser
size int64
}
-func (r *fakeReader) Read(b []byte) (int, error) {
+func (r *bodysizeReader) Read(b []byte) (int, error) {
n, err := r.reader.Read(b)
r.size += int64(n)
return n, err
}
-func (r *fakeReader) Close() error {
+func (r *bodysizeReader) Close() error {
return r.reader.Close()
}
-type fakeWriter struct {
+type bodysizeWriter struct {
http.ResponseWriter
size int64
code int
}
-func (w *fakeWriter) WriteHeader(statusCode int) {
+func (w *bodysizeWriter) WriteHeader(statusCode int) {
w.ResponseWriter.WriteHeader(statusCode)
w.code = statusCode
}
-func (w *fakeWriter) Write(body []byte) (int, error) {
+func (w *bodysizeWriter) Write(body []byte) (int, error) {
n, err := w.ResponseWriter.Write(body)
w.size += int64(n)
@@ -217,7 +216,7 @@ func (w *fakeWriter) Write(body []byte) (int, error) {
return n, err
}
-func (w *fakeWriter) Flush() {
+func (w *bodysizeWriter) Flush() {
flusher, ok := w.ResponseWriter.(http.Flusher)
if ok {
flusher.Flush()
diff --git a/http/middleware/session/session_test.go b/http/middleware/session/session_test.go
index 77ef3839..f5c0c597 100644
--- a/http/middleware/session/session_test.go
+++ b/http/middleware/session/session_test.go
@@ -1,9 +1,11 @@
package session
import (
+ "net/http"
"testing"
"github.com/datarhei/core/v16/encoding/json"
+ "github.com/datarhei/core/v16/mem"
"github.com/stretchr/testify/require"
)
@@ -134,3 +136,29 @@ func TestVerifySessionMultipleRemote(t *testing.T) {
_, err = verifySession(rawdata, "/memfs/6faad99a-c440-4df1-9344-963869718d8d/main.m3u8", "http://bar.example.com")
require.Error(t, err)
}
+
+func TestHeaderSize(t *testing.T) {
+ header := http.Header{}
+
+ header.Add("Content-Type", "application/json")
+ header.Add("Content-Encoding", "gzip")
+
+ buffer := &mem.Buffer{}
+ size := headerSize(header, buffer)
+
+ require.Equal(t, "Content-Encoding: gzip\r\nContent-Type: application/json\r\n", buffer.String())
+ require.Equal(t, int64(56), size)
+}
+
+func BenchmarkHeaderSize(b *testing.B) {
+ header := http.Header{}
+
+ header.Add("Content-Type", "application/json")
+ header.Add("Content-Encoding", "gzip")
+
+ buffer := &mem.Buffer{}
+
+ for i := 0; i < b.N; i++ {
+ headerSize(header, buffer)
+ }
+}
diff --git a/http/mock/mock.go b/http/mock/mock.go
index 51e3b8a0..39605431 100644
--- a/http/mock/mock.go
+++ b/http/mock/mock.go
@@ -2,23 +2,16 @@ package mock
import (
"bytes"
- "fmt"
"io"
"net/http"
"net/http/httptest"
"os"
- "path/filepath"
"strings"
"github.com/datarhei/core/v16/encoding/json"
- "github.com/datarhei/core/v16/ffmpeg"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/errorhandler"
"github.com/datarhei/core/v16/http/validator"
- "github.com/datarhei/core/v16/internal/testhelper"
- "github.com/datarhei/core/v16/io/fs"
- "github.com/datarhei/core/v16/restream"
- jsonstore "github.com/datarhei/core/v16/restream/store/json"
"github.com/invopop/jsonschema"
"github.com/labstack/echo/v4"
@@ -26,45 +19,6 @@ import (
"github.com/xeipuuv/gojsonschema"
)
-func DummyRestreamer(pathPrefix string) (restream.Restreamer, error) {
- binary, err := testhelper.BuildBinary("ffmpeg", filepath.Join(pathPrefix, "../../internal/testhelper"))
- if err != nil {
- return nil, fmt.Errorf("failed to build helper program: %w", err)
- }
-
- memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
- if err != nil {
- return nil, fmt.Errorf("failed to create memory filesystem: %w", err)
- }
-
- store, err := jsonstore.New(jsonstore.Config{
- Filesystem: memfs,
- })
- if err != nil {
- return nil, err
- }
-
- ffmpeg, err := ffmpeg.New(ffmpeg.Config{
- Binary: binary,
- MaxLogLines: 100,
- LogHistoryLength: 3,
- })
- if err != nil {
- return nil, err
- }
-
- rs, err := restream.New(restream.Config{
- Store: store,
- FFmpeg: ffmpeg,
- Filesystems: []fs.Filesystem{memfs},
- })
- if err != nil {
- return nil, err
- }
-
- return rs, nil
-}
-
func DummyEcho() *echo.Echo {
router := echo.New()
router.HideBanner = true
diff --git a/http/server.go b/http/server.go
index adde251a..c7dc68ab 100644
--- a/http/server.go
+++ b/http/server.go
@@ -30,9 +30,11 @@ package http
import (
"fmt"
+ "maps"
"net/http"
"strings"
"sync"
+ "time"
"github.com/datarhei/core/v16/cluster"
cfgstore "github.com/datarhei/core/v16/config/store"
@@ -51,6 +53,7 @@ import (
"github.com/datarhei/core/v16/monitor"
"github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/prometheus"
+ "github.com/datarhei/core/v16/resources"
"github.com/datarhei/core/v16/restream"
"github.com/datarhei/core/v16/rtmp"
"github.com/datarhei/core/v16/session"
@@ -100,12 +103,20 @@ type Config struct {
Cluster cluster.Cluster
IAM iam.IAM
IAMSkipper func(ip string) bool
+ Resources resources.Resources
+ Compress CompressConfig
}
type CorsConfig struct {
Origins []string
}
+type CompressConfig struct {
+ Encoding []string
+ MimeTypes []string
+ MinLength int
+}
+
type server struct {
logger log.Logger
@@ -141,8 +152,10 @@ type server struct {
iam echo.MiddlewareFunc
}
- gzip struct {
+ compress struct {
+ encoding []string
mimetypes []string
+ minLength int
}
filesystems map[string]*filesystem
@@ -155,7 +168,7 @@ type server struct {
metrics struct {
lock sync.Mutex
- status map[int]uint64
+ status map[string]uint64
}
}
@@ -175,7 +188,7 @@ func NewServer(config Config) (serverhandler.Server, error) {
readOnly: config.ReadOnly,
}
- s.metrics.status = map[int]uint64{}
+ s.metrics.status = map[string]uint64{}
s.filesystems = map[string]*filesystem{}
@@ -251,6 +264,7 @@ func NewServer(config Config) (serverhandler.Server, error) {
s.handler.about = api.NewAbout(
config.Restream,
+ config.Resources,
func() []string { return config.IAM.Validators() },
)
@@ -332,11 +346,13 @@ func NewServer(config Config) (serverhandler.Server, error) {
s.middleware.log = mwlog.NewWithConfig(mwlog.Config{
Logger: s.logger,
- Status: func(code int) {
+ Status: func(code int, method, path string, size int64, ttfb time.Duration) {
+ key := fmt.Sprintf("%d:%s:%s", code, method, path)
+
s.metrics.lock.Lock()
defer s.metrics.lock.Unlock()
- s.metrics.status[code]++
+ s.metrics.status[key]++
},
})
@@ -372,15 +388,9 @@ func NewServer(config Config) (serverhandler.Server, error) {
IAM: config.IAM,
}, "/api/graph/query")
- s.gzip.mimetypes = []string{
- "text/plain",
- "text/html",
- "text/javascript",
- "application/json",
- "application/x-mpegurl",
- "application/vnd.apple.mpegurl",
- "image/svg+xml",
- }
+ s.compress.encoding = config.Compress.Encoding
+ s.compress.mimetypes = config.Compress.MimeTypes
+ s.compress.minLength = config.Compress.MinLength
s.router = echo.New()
s.router.JSONSerializer = &GoJSONSerializer{}
@@ -406,6 +416,13 @@ func NewServer(config Config) (serverhandler.Server, error) {
s.router.Use(s.middleware.iam)
+ s.router.Use(mwcompress.NewWithConfig(mwcompress.Config{
+ Level: mwcompress.BestSpeed,
+ MinLength: config.Compress.MinLength,
+ Schemes: config.Compress.Encoding,
+ ContentTypes: config.Compress.MimeTypes,
+ }))
+
s.router.Use(mwsession.NewWithConfig(mwsession.Config{
HLSIngressCollector: config.Sessions.Collector("hlsingress"),
HLSEgressCollector: config.Sessions.Collector("hls"),
@@ -470,26 +487,18 @@ func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.router.ServeHTTP(w, r)
}
-func (s *server) HTTPStatus() map[int]uint64 {
- status := map[int]uint64{}
+func (s *server) HTTPStatus() map[string]uint64 {
+ status := map[string]uint64{}
s.metrics.lock.Lock()
defer s.metrics.lock.Unlock()
- for code, value := range s.metrics.status {
- status[code] = value
- }
+ maps.Copy(status, s.metrics.status)
return status
}
func (s *server) setRoutes() {
- gzipMiddleware := mwcompress.NewWithConfig(mwcompress.Config{
- Level: mwcompress.BestSpeed,
- MinLength: 1000,
- Skipper: mwcompress.ContentTypeSkipper(nil),
- })
-
// API router grouo
api := s.router.Group("/api")
@@ -505,7 +514,6 @@ func (s *server) setRoutes() {
// Swagger API documentation router group
doc := s.router.Group("/api/swagger/*")
- doc.Use(gzipMiddleware)
doc.GET("", echoSwagger.WrapHandler)
// Mount filesystems
@@ -524,14 +532,6 @@ func (s *server) setRoutes() {
DefaultContentType: filesystem.DefaultContentType,
}))
- if filesystem.Gzip {
- fs.Use(mwcompress.NewWithConfig(mwcompress.Config{
- Skipper: mwcompress.ContentTypeSkipper(s.gzip.mimetypes),
- Level: mwcompress.BestSpeed,
- MinLength: 1000,
- }))
- }
-
if filesystem.Cache != nil {
mwcache := mwcache.NewWithConfig(mwcache.Config{
Cache: filesystem.Cache,
@@ -585,7 +585,7 @@ func (s *server) setRoutes() {
// GraphQL
graphql := api.Group("/graph")
- graphql.Use(gzipMiddleware)
+ //graphql.Use(gzipMiddleware)
graphql.GET("", s.handler.graph.Playground)
graphql.POST("/query", s.handler.graph.Query)
@@ -593,7 +593,7 @@ func (s *server) setRoutes() {
// APIv3 router group
v3 := api.Group("/v3")
- v3.Use(gzipMiddleware)
+ //v3.Use(gzipMiddleware)
s.setRoutesV3(v3)
}
@@ -759,6 +759,8 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
v3.GET("/cluster/fs/:storage", s.v3handler.cluster.FilesystemListFiles)
+ v3.POST("/cluster/events", s.v3handler.cluster.Events)
+
if !s.readOnly {
v3.PUT("/cluster/transfer/:id", s.v3handler.cluster.TransferLeadership)
v3.PUT("/cluster/leave", s.v3handler.cluster.Leave)
diff --git a/http/server/server.go b/http/server/server.go
index 893bf462..d46ea92e 100644
--- a/http/server/server.go
+++ b/http/server/server.go
@@ -4,5 +4,5 @@ import "net/http"
type Server interface {
ServeHTTP(w http.ResponseWriter, r *http.Request)
- HTTPStatus() map[int]uint64
+ HTTPStatus() map[string]uint64
}
diff --git a/internal/.gitignore b/internal/.gitignore
index 9872bd8c..ad8efa9c 100644
--- a/internal/.gitignore
+++ b/internal/.gitignore
@@ -2,4 +2,5 @@ testhelper/ignoresigint/ignoresigint
testhelper/sigint/sigint
testhelper/sigintwait/sigintwait
testhelper/sigpropagate/sigpropagate
-testhelper/ffmpeg/ffmpeg
\ No newline at end of file
+testhelper/ffmpeg/ffmpeg
+testhelper/nvidia-smi/nvidia-smi
\ No newline at end of file
diff --git a/internal/mock/psutil/psutil.go b/internal/mock/psutil/psutil.go
new file mode 100644
index 00000000..e03af2a0
--- /dev/null
+++ b/internal/mock/psutil/psutil.go
@@ -0,0 +1,123 @@
+package psutil
+
+import (
+ "sync"
+
+ "github.com/datarhei/core/v16/resources/psutil"
+)
+
+type MockPSUtil struct {
+ Lock sync.Mutex
+
+ CPUInfo psutil.CPUInfo
+ MemInfo psutil.MemoryInfo
+ GPUInfo []psutil.GPUInfo
+}
+
+func New(ngpu int) *MockPSUtil {
+ u := &MockPSUtil{
+ CPUInfo: psutil.CPUInfo{
+ System: 10,
+ User: 50,
+ Idle: 35,
+ Other: 5,
+ },
+ MemInfo: psutil.MemoryInfo{
+ Total: 200,
+ Available: 40,
+ Used: 160,
+ },
+ }
+
+ for i := 0; i < ngpu; i++ {
+ u.GPUInfo = append(u.GPUInfo, psutil.GPUInfo{
+ Index: i,
+ ID: "00000000:01:00.0",
+ Name: "L4",
+ MemoryTotal: 24 * 1024 * 1024 * 1024,
+ MemoryUsed: uint64(12+i) * 1024 * 1024 * 1024,
+ Usage: 50 - float64((i+1)*5),
+ Encoder: 50 - float64((i+1)*10),
+ Decoder: 50 - float64((i+1)*3),
+ })
+ }
+
+ return u
+}
+
+func (u *MockPSUtil) Start() {}
+func (u *MockPSUtil) Cancel() {}
+
+func (u *MockPSUtil) CPUCounts() (float64, error) {
+ return 2, nil
+}
+
+func (u *MockPSUtil) CPU() (*psutil.CPUInfo, error) {
+ u.Lock.Lock()
+ defer u.Lock.Unlock()
+
+ cpu := u.CPUInfo
+
+ return &cpu, nil
+}
+
+func (u *MockPSUtil) Disk(path string) (*psutil.DiskInfo, error) {
+ return &psutil.DiskInfo{}, nil
+}
+
+func (u *MockPSUtil) Memory() (*psutil.MemoryInfo, error) {
+ u.Lock.Lock()
+ defer u.Lock.Unlock()
+
+ mem := u.MemInfo
+
+ return &mem, nil
+}
+
+func (u *MockPSUtil) Network() ([]psutil.NetworkInfo, error) {
+ return nil, nil
+}
+
+func (u *MockPSUtil) GPU() ([]psutil.GPUInfo, error) {
+ u.Lock.Lock()
+ defer u.Lock.Unlock()
+
+ gpu := []psutil.GPUInfo{}
+
+ gpu = append(gpu, u.GPUInfo...)
+
+ return gpu, nil
+}
+
+func (u *MockPSUtil) Process(pid int32) (psutil.Process, error) {
+ return &mockPSUtilProcess{}, nil
+}
+
+type mockPSUtilProcess struct{}
+
+func (p *mockPSUtilProcess) CPU() (*psutil.CPUInfo, error) {
+ s := &psutil.CPUInfo{
+ System: 1,
+ User: 2,
+ Idle: 0,
+ Other: 3,
+ }
+
+ return s, nil
+}
+
+func (p *mockPSUtilProcess) Memory() (uint64, error) { return 42, nil }
+func (p *mockPSUtilProcess) GPU() (*psutil.GPUInfo, error) {
+ return &psutil.GPUInfo{
+ Index: 0,
+ Name: "L4",
+ MemoryTotal: 128,
+ MemoryUsed: 42,
+ Usage: 5,
+ Encoder: 9,
+ Decoder: 11,
+ }, nil
+}
+func (p *mockPSUtilProcess) Cancel() {}
+func (p *mockPSUtilProcess) Suspend() error { return nil }
+func (p *mockPSUtilProcess) Resume() error { return nil }
diff --git a/internal/mock/resources/resources.go b/internal/mock/resources/resources.go
new file mode 100644
index 00000000..127e870d
--- /dev/null
+++ b/internal/mock/resources/resources.go
@@ -0,0 +1,26 @@
+package resources
+
+import (
+ "github.com/datarhei/core/v16/internal/mock/psutil"
+ "github.com/datarhei/core/v16/resources"
+)
+
+func New() resources.Resources {
+ res, _ := resources.New(resources.Config{
+ PSUtil: psutil.New(1),
+ })
+
+ return res
+}
+
+func NewWithLimits() resources.Resources {
+ res, _ := resources.New(resources.Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 100,
+ MaxGPUMemory: 100,
+ PSUtil: psutil.New(1),
+ })
+
+ return res
+}
diff --git a/internal/mock/restream/restream.go b/internal/mock/restream/restream.go
new file mode 100644
index 00000000..af9a9876
--- /dev/null
+++ b/internal/mock/restream/restream.go
@@ -0,0 +1,99 @@
+package restream
+
+import (
+ "fmt"
+
+ "github.com/datarhei/core/v16/ffmpeg"
+ "github.com/datarhei/core/v16/iam"
+ iamidentity "github.com/datarhei/core/v16/iam/identity"
+ "github.com/datarhei/core/v16/iam/policy"
+ "github.com/datarhei/core/v16/internal/mock/resources"
+ "github.com/datarhei/core/v16/internal/testhelper"
+ "github.com/datarhei/core/v16/io/fs"
+ "github.com/datarhei/core/v16/net"
+ "github.com/datarhei/core/v16/restream"
+ "github.com/datarhei/core/v16/restream/replace"
+ "github.com/datarhei/core/v16/restream/rewrite"
+ jsonstore "github.com/datarhei/core/v16/restream/store/json"
+)
+
+func New(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (restream.Restreamer, error) {
+ binary, err := testhelper.BuildBinary("ffmpeg")
+ if err != nil {
+ return nil, fmt.Errorf("failed to build helper program: %w", err)
+ }
+
+ resources := resources.New()
+
+ ffmpeg, err := ffmpeg.New(ffmpeg.Config{
+ Binary: binary,
+ LogHistoryLength: 3,
+ MaxLogLines: 100,
+ Portrange: portrange,
+ ValidatorInput: validatorIn,
+ ValidatorOutput: validatorOut,
+ Resource: resources,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
+ if err != nil {
+ return nil, err
+ }
+
+ store, err := jsonstore.New(jsonstore.Config{
+ Filesystem: memfs,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ policyAdapter, err := policy.NewJSONAdapter(memfs, "./policy.json", nil)
+ if err != nil {
+ return nil, err
+ }
+
+ identityAdapter, err := iamidentity.NewJSONAdapter(memfs, "./users.json", nil)
+ if err != nil {
+ return nil, err
+ }
+
+ iam, err := iam.New(iam.Config{
+ PolicyAdapter: policyAdapter,
+ IdentityAdapter: identityAdapter,
+ Superuser: iamidentity.User{
+ Name: "foobar",
+ },
+ JWTRealm: "",
+ JWTSecret: "",
+ Logger: nil,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ iam.AddPolicy("$anon", "$none", []string{"process"}, "*", []string{"CREATE", "GET", "DELETE", "UPDATE", "COMMAND", "PROBE", "METADATA", "PLAYOUT"})
+
+ rewriter, err := rewrite.New(rewrite.Config{
+ IAM: iam,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ rs, err := restream.New(restream.Config{
+ Store: store,
+ FFmpeg: ffmpeg,
+ Replace: replacer,
+ Filesystems: []fs.Filesystem{memfs},
+ Rewrite: rewriter,
+ Resources: resources,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return rs, nil
+}
diff --git a/internal/testhelper/nvidia-smi/nvidia-smi.go b/internal/testhelper/nvidia-smi/nvidia-smi.go
new file mode 100644
index 00000000..7bab74a3
--- /dev/null
+++ b/internal/testhelper/nvidia-smi/nvidia-smi.go
@@ -0,0 +1,986 @@
+package main
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "slices"
+ "time"
+)
+
+var pmondata = `# gpu pid type sm mem enc dec fb command
+# Idx # C/G % % % % MB name
+ 0 7372 C 2 0 2 - 136 ffmpeg
+ 0 12176 C 5 2 3 7 782 ffmpeg
+ 1 20035 C 8 2 4 1 1145 ffmpeg
+ 1 20141 C 2 1 1 3 429 ffmpeg
+ 0 29591 C 2 1 - 2 435 ffmpeg `
+
+var querydata = `
+
+
+ Mon Jul 15 13:41:56 2024
+ 555.42.06
+ 12.5
+ 2
+
+ NVIDIA L4
+ NVIDIA
+ Ada Lovelace
+ Enabled
+ Disabled
+ Disabled
+ None
+
+ N/A
+ N/A
+
+
+ None
+
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ 1654523003308
+ GPU-c5533cd4-5a60-059e-348d-b6d7466932e4
+ 1
+ 95.04.29.00.06
+ No
+ 0x100
+ 900-2G193-0000-001
+ 27B8-895-A1
+ N/A
+ 1
+
+ G193.0200.00.01
+ 2.1
+ 6.16
+ N/A
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+ None
+ N/A
+ N/A
+
+
+ No
+ N/A
+
+ 555.42.06
+
+ N/A
+
+
+ 01
+ 00
+ 0000
+ 3
+ 2
+ 27B810DE
+ 00000000:01:00.0
+ 16CA10DE
+
+
+ 4
+ 4
+ 4
+ 4
+ 5
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+ N/A
+ N/A
+
+ N/A
+ P0
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+ N/A
+
+ 23034 MiB
+ 434 MiB
+ 1 MiB
+ 22601 MiB
+
+
+ 32768 MiB
+ 1 MiB
+ 32767 MiB
+
+
+ 0 MiB
+ 0 MiB
+ 0 MiB
+
+ Default
+
+ 2 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ Enabled
+ Enabled
+
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ No
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+
+ 0
+ 0
+ No
+ No
+
+ 96 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+
+
+
+ 45 C
+ 39 C
+ -5 C
+ -2 C
+ 0 C
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ P0
+ 27.22 W
+ 72.00 W
+ 72.00 W
+ 72.00 W
+ 40.00 W
+ 72.00 W
+
+
+ N/A
+
+
+ P0
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6250 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6251 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+
+
+ N/A
+ N/A
+
+
+ 885.000 mV
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+ N/A
+
+
+
+
+ 6251 MHz
+ 2040 MHz
+ 2025 MHz
+ 2010 MHz
+ 1995 MHz
+ 1980 MHz
+ 1965 MHz
+ 1950 MHz
+ 1935 MHz
+ 1920 MHz
+ 1905 MHz
+ 1890 MHz
+ 1875 MHz
+ 1860 MHz
+ 1845 MHz
+ 1830 MHz
+ 1815 MHz
+ 1800 MHz
+ 1785 MHz
+ 1770 MHz
+ 1755 MHz
+ 1740 MHz
+ 1725 MHz
+ 1710 MHz
+ 1695 MHz
+ 1680 MHz
+ 1665 MHz
+ 1650 MHz
+ 1635 MHz
+ 1620 MHz
+ 1605 MHz
+ 1590 MHz
+ 1575 MHz
+ 1560 MHz
+ 1545 MHz
+ 1530 MHz
+ 1515 MHz
+ 1500 MHz
+ 1485 MHz
+ 1470 MHz
+ 1455 MHz
+ 1440 MHz
+ 1425 MHz
+ 1410 MHz
+ 1395 MHz
+ 1380 MHz
+ 1365 MHz
+ 1350 MHz
+ 1335 MHz
+ 1320 MHz
+ 1305 MHz
+ 1290 MHz
+ 1275 MHz
+ 1260 MHz
+ 1245 MHz
+ 1230 MHz
+ 1215 MHz
+ 1200 MHz
+ 1185 MHz
+ 1170 MHz
+ 1155 MHz
+ 1140 MHz
+ 1125 MHz
+ 1110 MHz
+ 1095 MHz
+ 1080 MHz
+ 1065 MHz
+ 1050 MHz
+ 1035 MHz
+ 1020 MHz
+ 1005 MHz
+ 990 MHz
+ 975 MHz
+ 960 MHz
+ 945 MHz
+ 930 MHz
+ 915 MHz
+ 900 MHz
+ 885 MHz
+ 870 MHz
+ 855 MHz
+ 840 MHz
+ 825 MHz
+ 810 MHz
+ 795 MHz
+ 780 MHz
+ 765 MHz
+ 750 MHz
+ 735 MHz
+ 720 MHz
+ 705 MHz
+ 690 MHz
+ 675 MHz
+ 660 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+ 405 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+
+
+ 10131
+ C
+ ffmpeg
+ 389 MiB
+
+
+ 13597
+ C
+ ffmpeg
+ 1054 MiB
+
+
+
+
+
+ disabled
+
+
+
+
+ NVIDIA L4
+ NVIDIA
+ Ada Lovelace
+ Enabled
+ Disabled
+ Disabled
+ None
+
+ N/A
+ N/A
+
+
+ None
+
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ 1654523001128
+ GPU-128ab6fb-6ec9-fd74-b479-4a5fd14f55bd
+ 0
+ 95.04.29.00.06
+ No
+ 0xc100
+ 900-2G193-0000-001
+ 27B8-895-A1
+ N/A
+ 1
+
+ G193.0200.00.01
+ 2.1
+ 6.16
+ N/A
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+ None
+ N/A
+ N/A
+
+
+ No
+ N/A
+
+ 555.42.06
+
+ N/A
+
+
+ C1
+ 00
+ 0000
+ 3
+ 2
+ 27B810DE
+ 00000000:C1:00.0
+ 16CA10DE
+
+
+ 4
+ 4
+ 4
+ 4
+ 5
+
+
+ 16x
+ 1x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+ N/A
+ N/A
+
+ N/A
+ P0
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+ N/A
+
+ 23034 MiB
+ 434 MiB
+ 1 MiB
+ 22601 MiB
+
+
+ 32768 MiB
+ 1 MiB
+ 32767 MiB
+
+
+ 0 MiB
+ 0 MiB
+ 0 MiB
+
+ Default
+
+ 3 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ Enabled
+ Enabled
+
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ No
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+
+ 0
+ 0
+ No
+ No
+
+ 96 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+
+
+
+ 40 C
+ 43 C
+ -5 C
+ -2 C
+ 0 C
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ P0
+ 29.54 W
+ 72.00 W
+ 72.00 W
+ 72.00 W
+ 40.00 W
+ 72.00 W
+
+
+ N/A
+
+
+ P0
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6250 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6251 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+
+
+ N/A
+ N/A
+
+
+ 910.000 mV
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+ N/A
+
+
+
+
+ 6251 MHz
+ 2040 MHz
+ 2025 MHz
+ 2010 MHz
+ 1995 MHz
+ 1980 MHz
+ 1965 MHz
+ 1950 MHz
+ 1935 MHz
+ 1920 MHz
+ 1905 MHz
+ 1890 MHz
+ 1875 MHz
+ 1860 MHz
+ 1845 MHz
+ 1830 MHz
+ 1815 MHz
+ 1800 MHz
+ 1785 MHz
+ 1770 MHz
+ 1755 MHz
+ 1740 MHz
+ 1725 MHz
+ 1710 MHz
+ 1695 MHz
+ 1680 MHz
+ 1665 MHz
+ 1650 MHz
+ 1635 MHz
+ 1620 MHz
+ 1605 MHz
+ 1590 MHz
+ 1575 MHz
+ 1560 MHz
+ 1545 MHz
+ 1530 MHz
+ 1515 MHz
+ 1500 MHz
+ 1485 MHz
+ 1470 MHz
+ 1455 MHz
+ 1440 MHz
+ 1425 MHz
+ 1410 MHz
+ 1395 MHz
+ 1380 MHz
+ 1365 MHz
+ 1350 MHz
+ 1335 MHz
+ 1320 MHz
+ 1305 MHz
+ 1290 MHz
+ 1275 MHz
+ 1260 MHz
+ 1245 MHz
+ 1230 MHz
+ 1215 MHz
+ 1200 MHz
+ 1185 MHz
+ 1170 MHz
+ 1155 MHz
+ 1140 MHz
+ 1125 MHz
+ 1110 MHz
+ 1095 MHz
+ 1080 MHz
+ 1065 MHz
+ 1050 MHz
+ 1035 MHz
+ 1020 MHz
+ 1005 MHz
+ 990 MHz
+ 975 MHz
+ 960 MHz
+ 945 MHz
+ 930 MHz
+ 915 MHz
+ 900 MHz
+ 885 MHz
+ 870 MHz
+ 855 MHz
+ 840 MHz
+ 825 MHz
+ 810 MHz
+ 795 MHz
+ 780 MHz
+ 765 MHz
+ 750 MHz
+ 735 MHz
+ 720 MHz
+ 705 MHz
+ 690 MHz
+ 675 MHz
+ 660 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+ 405 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+
+
+ 16870
+ C
+ ffmpeg
+ 549 MiB
+
+
+
+
+
+ disabled
+
+
+
+`
+
+func main() {
+ if len(os.Args) == 1 {
+ os.Exit(1)
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ wait := false
+
+ if os.Args[1] == "pmon" {
+ if slices.Contains(os.Args[1:], "-c") {
+ fmt.Fprintf(os.Stdout, "%s\n", pmondata)
+ } else {
+ go func(ctx context.Context) {
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ fmt.Fprintf(os.Stdout, "%s\n", pmondata)
+ }
+ }
+ }(ctx)
+ }
+ } else {
+ if !slices.Contains(os.Args[1:], "-l") {
+ fmt.Fprintf(os.Stdout, "%s\n", querydata)
+ } else {
+ wait = true
+ go func(ctx context.Context) {
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ fmt.Fprintf(os.Stdout, "%s\n", querydata)
+ }
+ }
+ }(ctx)
+ }
+ }
+
+ if wait {
+ // Wait for interrupt signal to gracefully shutdown the app
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, os.Interrupt)
+ <-quit
+ }
+
+ cancel()
+
+ os.Exit(0)
+}
diff --git a/internal/testhelper/testhelper.go b/internal/testhelper/testhelper.go
index 0a6ac32d..70004ab8 100644
--- a/internal/testhelper/testhelper.go
+++ b/internal/testhelper/testhelper.go
@@ -4,10 +4,12 @@ import (
"fmt"
"os/exec"
"path/filepath"
+ "runtime"
)
-func BuildBinary(name, pathprefix string) (string, error) {
- dir := filepath.Join(pathprefix, name)
+func BuildBinary(name string) (string, error) {
+ _, filename, _, _ := runtime.Caller(0)
+ dir := filepath.Join(filepath.Dir(filename), name)
aout := filepath.Join(dir, name)
err := exec.Command("go", "build", "-o", aout, dir).Run()
diff --git a/io/fs/disk.go b/io/fs/disk.go
index 6cac9d40..b9f76b77 100644
--- a/io/fs/disk.go
+++ b/io/fs/disk.go
@@ -403,6 +403,31 @@ func (fs *diskFilesystem) WriteFileSafe(path string, data []byte) (int64, bool,
return int64(size), !replace, nil
}
+func (fs *diskFilesystem) AppendFileReader(path string, r io.Reader, sizeHint int) (int64, error) {
+ path = fs.cleanPath(path)
+
+ dir := filepath.Dir(path)
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ return -1, fmt.Errorf("creating file failed: %w", err)
+ }
+
+ f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return -1, err
+ }
+
+ defer f.Close()
+
+ size, err := f.ReadFrom(r)
+ if err != nil {
+ return -1, fmt.Errorf("reading data failed: %w", err)
+ }
+
+ fs.lastSizeCheck = time.Time{}
+
+ return size, nil
+}
+
func (fs *diskFilesystem) Rename(src, dst string) error {
src = fs.cleanPath(src)
dst = fs.cleanPath(dst)
diff --git a/io/fs/fs.go b/io/fs/fs.go
index 4cc8e201..973be194 100644
--- a/io/fs/fs.go
+++ b/io/fs/fs.go
@@ -108,6 +108,10 @@ type WriteFilesystem interface {
// an error adding the file and error is not nil.
WriteFileSafe(path string, data []byte) (int64, bool, error)
+ // AppendFileReader appends the contents from reader to the file at path. If the file doesn't
+ // exist, it will be created. The number of written bytes will be returned, -1 otherwise.
+ AppendFileReader(path string, r io.Reader, size int) (int64, error)
+
// MkdirAll creates a directory named path, along with any necessary parents, and returns nil,
// or else returns an error. The permission bits perm (before umask) are used for all directories
// that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil.
diff --git a/io/fs/fs_test.go b/io/fs/fs_test.go
index 9d52dddb..2855e20f 100644
--- a/io/fs/fs_test.go
+++ b/io/fs/fs_test.go
@@ -59,8 +59,14 @@ func TestFilesystem(t *testing.T) {
os.RemoveAll("./testing/")
filesystems := map[string]func(string) (Filesystem, error){
- "memfs": func(name string) (Filesystem, error) {
- return NewMemFilesystem(MemConfig{})
+ "memfs-map": func(name string) (Filesystem, error) {
+ return NewMemFilesystem(MemConfig{Storage: "map"})
+ },
+ "memfs-xsync": func(name string) (Filesystem, error) {
+ return NewMemFilesystem(MemConfig{Storage: "xsync"})
+ },
+ "memfs-swiss": func(name string) (Filesystem, error) {
+ return NewMemFilesystem(MemConfig{Storage: "swiss"})
},
"diskfs": func(name string) (Filesystem, error) {
return NewRootedDiskFilesystem(RootedDiskConfig{
@@ -109,6 +115,8 @@ func TestFilesystem(t *testing.T) {
"symlinkErrors": testSymlinkErrors,
"symlinkOpenStat": testSymlinkOpenStat,
"open": testOpen,
+ "append": testAppend,
+ "appendCreate": testAppendCreate,
}
for fsname, fs := range filesystems {
@@ -119,6 +127,11 @@ func TestFilesystem(t *testing.T) {
}
filesystem, err := fs(name)
require.NoError(t, err)
+
+ if fsname == "s3fs" {
+ filesystem.RemoveList("/", ListOptions{Pattern: "/**"})
+ }
+
test(t, filesystem)
})
}
@@ -853,3 +866,28 @@ func testSymlinkErrors(t *testing.T, fs Filesystem) {
err = fs.Symlink("/bazfoo", "/barfoo")
require.Error(t, err)
}
+
+func testAppend(t *testing.T, fs Filesystem) {
+ _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("part1"), -1)
+ require.NoError(t, err)
+
+ _, err = fs.AppendFileReader("/foobar", strings.NewReader("part2"), -1)
+ require.NoError(t, err)
+
+ file := fs.Open("/foobar")
+ require.NotNil(t, file)
+
+ data, _ := io.ReadAll(file)
+ require.Equal(t, []byte("part1part2"), data)
+}
+
+func testAppendCreate(t *testing.T, fs Filesystem) {
+ _, err := fs.AppendFileReader("/foobar", strings.NewReader("part1"), -1)
+ require.NoError(t, err)
+
+ file := fs.Open("/foobar")
+ require.NotNil(t, file)
+
+ data, _ := io.ReadAll(file)
+ require.Equal(t, []byte("part1"), data)
+}
diff --git a/io/fs/mem.go b/io/fs/mem.go
index 2033d349..e1d33aae 100644
--- a/io/fs/mem.go
+++ b/io/fs/mem.go
@@ -2,7 +2,6 @@ package fs
import (
"bytes"
- "errors"
"fmt"
"io"
"io/fs"
@@ -15,6 +14,7 @@ import (
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/log"
+ "github.com/datarhei/core/v16/mem"
)
// MemConfig is the config that is required for creating
@@ -68,8 +68,8 @@ func (f *memFileInfo) IsDir() bool {
type memFile struct {
memFileInfo
- data *bytes.Buffer // Contents of the file
- r *bytes.Reader
+ data *mem.Buffer // Contents of the file
+ r io.ReadSeeker
}
func (f *memFile) Name() string {
@@ -105,14 +105,20 @@ func (f *memFile) Seek(offset int64, whence int) (int64, error) {
}
func (f *memFile) Close() error {
- if f.data == nil {
- return io.EOF
+ var err error = nil
+
+ if f.r == nil {
+ err = io.EOF
}
f.r = nil
- f.data = nil
- return nil
+ if f.data != nil {
+ mem.Put(f.data)
+ f.data = nil
+ }
+
+ return err
}
type memFilesystem struct {
@@ -208,6 +214,8 @@ func NewMemFilesystem(config MemConfig) (Filesystem, error) {
if config.Storage == "map" {
fs.storage = newMapStorage()
+ } else if config.Storage == "swiss" {
+ fs.storage = newSwissMapStorage()
} else {
fs.storage = newMapOfStorage()
}
@@ -312,32 +320,23 @@ func (fs *memFilesystem) Files() int64 {
func (fs *memFilesystem) Open(path string) File {
path = fs.cleanPath(path)
- file, ok := fs.storage.LoadAndCopy(path)
+ newFile, ok := fs.storage.LoadAndCopy(path)
if !ok {
return nil
}
- newFile := &memFile{
- memFileInfo: memFileInfo{
- name: file.name,
- lastMod: file.lastMod,
- linkTo: file.linkTo,
- },
- }
-
- if len(file.linkTo) != 0 {
- file.Close()
-
- file, ok = fs.storage.LoadAndCopy(file.linkTo)
+ if len(newFile.linkTo) != 0 {
+ file, ok := fs.storage.LoadAndCopy(newFile.linkTo)
if !ok {
return nil
}
+
+ newFile.lastMod = file.lastMod
+ newFile.data = file.data
+ newFile.size = file.size
}
- newFile.lastMod = file.lastMod
- newFile.data = file.data
- newFile.size = file.size
- newFile.r = bytes.NewReader(file.data.Bytes())
+ newFile.r = newFile.data.Reader()
return newFile
}
@@ -351,15 +350,12 @@ func (fs *memFilesystem) ReadFile(path string) ([]byte, error) {
}
if len(file.linkTo) != 0 {
- file.Close()
-
file, ok = fs.storage.LoadAndCopy(file.linkTo)
if !ok {
return nil, ErrNotExist
}
}
- defer file.Close()
return file.data.Bytes(), nil
}
@@ -409,43 +405,6 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
return nil
}
-var chunkPool = sync.Pool{
- New: func() interface{} {
- chunk := make([]byte, 128*1024)
- return &chunk
- },
-}
-
-func copyToBufferFromReader(buf *bytes.Buffer, r io.Reader, _ int) (int64, error) {
- chunkPtr := chunkPool.Get().(*[]byte)
- chunk := *chunkPtr
- defer chunkPool.Put(chunkPtr)
-
- size := int64(0)
-
- for {
- n, err := r.Read(chunk)
- if n != 0 {
- buf.Write(chunk[:n])
- size += int64(n)
- }
-
- if err != nil {
- if errors.Is(err, io.EOF) {
- return size, nil
- }
-
- return size, err
- }
-
- if n == 0 {
- break
- }
- }
-
- return size, nil
-}
-
func (fs *memFilesystem) WriteFileReader(path string, r io.Reader, sizeHint int) (int64, bool, error) {
path = fs.cleanPath(path)
@@ -461,14 +420,10 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader, sizeHint int)
size: 0,
lastMod: time.Now(),
},
- data: &bytes.Buffer{},
+ data: mem.Get(),
}
- if sizeHint > 0 {
- newFile.data.Grow(sizeHint)
- }
-
- size, err := copyToBufferFromReader(newFile.data, r, 8*1024)
+ size, err := newFile.data.ReadFrom(r)
if err != nil {
fs.logger.WithFields(log.Fields{
"path": path,
@@ -523,6 +478,51 @@ func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, e
return fs.WriteFileReader(path, bytes.NewReader(data), len(data))
}
+func (fs *memFilesystem) AppendFileReader(path string, r io.Reader, sizeHint int) (int64, error) {
+ path = fs.cleanPath(path)
+
+ file, hasFile := fs.storage.LoadAndCopy(path)
+ if !hasFile {
+ size, _, err := fs.WriteFileReader(path, r, sizeHint)
+ return size, err
+ }
+
+ size, err := file.data.ReadFrom(r)
+ if err != nil {
+ fs.logger.WithFields(log.Fields{
+ "path": path,
+ "filesize_bytes": size,
+ "error": err,
+ }).Warn().Log("Incomplete file")
+
+ file.Close()
+
+ return -1, fmt.Errorf("incomplete file")
+ }
+
+ file.size += size
+ file.lastMod = time.Now()
+
+ oldFile, replace := fs.storage.Store(path, file)
+
+ fs.sizeLock.Lock()
+ defer fs.sizeLock.Unlock()
+
+ if replace {
+ oldFile.Close()
+ }
+
+ fs.currentSize += size
+
+ fs.logger.Debug().WithFields(log.Fields{
+ "path": file.name,
+ "filesize_bytes": file.size,
+ "size_bytes": fs.currentSize,
+ }).Log("Appended to file")
+
+ return size, nil
+}
+
func (fs *memFilesystem) Purge(size int64) int64 {
files := []*memFile{}
@@ -628,27 +628,18 @@ func (fs *memFilesystem) Copy(src, dst string) error {
return os.ErrInvalid
}
- srcFile, ok := fs.storage.LoadAndCopy(src)
+ file, ok := fs.storage.LoadAndCopy(src)
if !ok {
return ErrNotExist
}
- if srcFile.dir {
- srcFile.Close()
+ if file.dir {
return ErrNotExist
}
- dstFile := &memFile{
- memFileInfo: memFileInfo{
- name: dst,
- dir: false,
- size: srcFile.size,
- lastMod: time.Now(),
- },
- data: srcFile.data,
- }
+ file.lastMod = time.Now()
- f, replace := fs.storage.Store(dst, dstFile)
+ replacedFile, replace := fs.storage.Store(dst, file)
if !replace {
fs.dirs.Add(dst)
@@ -658,11 +649,11 @@ func (fs *memFilesystem) Copy(src, dst string) error {
defer fs.sizeLock.Unlock()
if replace {
- f.Close()
- fs.currentSize -= f.size
+ replacedFile.Close()
+ fs.currentSize -= replacedFile.size
}
- fs.currentSize += dstFile.size
+ fs.currentSize += file.size
return nil
}
diff --git a/io/fs/mem_storage.go b/io/fs/mem_storage.go
index e6bc2362..18d8f156 100644
--- a/io/fs/mem_storage.go
+++ b/io/fs/mem_storage.go
@@ -1,19 +1,36 @@
package fs
import (
- "bytes"
"sync"
+ "github.com/datarhei/core/v16/mem"
+ "github.com/dolthub/swiss"
"github.com/puzpuzpuz/xsync/v3"
)
type memStorage interface {
- Delete(key string) (*memFile, bool)
- Store(key string, value *memFile) (*memFile, bool)
- Load(key string) (value *memFile, ok bool)
- LoadAndCopy(key string) (value *memFile, ok bool)
+ // Delete deletes a file from the storage.
+ Delete(key string) (file *memFile, ok bool)
+
+ // Store stores a file to the storage. If there's already a file with
+ // the same key, that value will be returned and replaced with the
+ // new file.
+ Store(key string, file *memFile) (oldfile *memFile, ok bool)
+
+ // Load loads a file from the storage. This is a references to the file,
+ // i.e. all changes to the file will be reflected on the storage.
+ Load(key string) (file *memFile, ok bool)
+
+ // LoadAndCopy loads a file from the storage. This is a copy of file
+ // metadata and content.
+ LoadAndCopy(key string) (file *memFile, ok bool)
+
+ // Has checks whether a file exists at path.
Has(key string) bool
- Range(f func(key string, value *memFile) bool)
+
+ // Range ranges over all files on the storage. The callback needs to return
+ // false in order to stop the iteration.
+ Range(f func(key string, file *memFile) bool)
}
type mapOfStorage struct {
@@ -55,27 +72,29 @@ func (m *mapOfStorage) LoadAndCopy(key string) (*memFile, bool) {
token := m.lock.RLock()
defer m.lock.RUnlock(token)
- v, ok := m.files.Load(key)
+ file, ok := m.files.Load(key)
if !ok {
return nil, false
}
- f := &memFile{
+ newFile := &memFile{
memFileInfo: memFileInfo{
- name: v.name,
- size: v.size,
- dir: v.dir,
- lastMod: v.lastMod,
- linkTo: v.linkTo,
+ name: file.name,
+ size: file.size,
+ dir: file.dir,
+ lastMod: file.lastMod,
+ linkTo: file.linkTo,
},
- r: nil,
+ data: nil,
+ r: nil,
}
- if v.data != nil {
- f.data = bytes.NewBuffer(v.data.Bytes())
+ if file.data != nil {
+ newFile.data = mem.Get()
+ file.data.WriteTo(newFile.data)
}
- return f, true
+ return newFile, true
}
func (m *mapOfStorage) Has(key string) bool {
@@ -145,7 +164,7 @@ func (m *mapStorage) LoadAndCopy(key string) (*memFile, bool) {
return nil, false
}
- f := &memFile{
+ newFile := &memFile{
memFileInfo: memFileInfo{
name: v.name,
size: v.size,
@@ -153,14 +172,16 @@ func (m *mapStorage) LoadAndCopy(key string) (*memFile, bool) {
lastMod: v.lastMod,
linkTo: v.linkTo,
},
- r: nil,
+ data: nil,
+ r: nil,
}
if v.data != nil {
- f.data = bytes.NewBuffer(v.data.Bytes())
+ newFile.data = mem.Get()
+ v.data.WriteTo(newFile.data)
}
- return f, true
+ return newFile, true
}
func (m *mapStorage) Has(key string) bool {
@@ -182,3 +203,93 @@ func (m *mapStorage) Range(f func(key string, value *memFile) bool) {
}
}
}
+
+type swissMapStorage struct {
+ lock *xsync.RBMutex
+ files *swiss.Map[string, *memFile]
+}
+
+func newSwissMapStorage() memStorage {
+ m := &swissMapStorage{
+ lock: xsync.NewRBMutex(),
+ files: swiss.NewMap[string, *memFile](128),
+ }
+
+ return m
+}
+
+func (m *swissMapStorage) Delete(key string) (*memFile, bool) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ file, hasFile := m.files.Get(key)
+ if !hasFile {
+ return nil, false
+ }
+
+ m.files.Delete(key)
+
+ return file, true
+}
+
+func (m *swissMapStorage) Store(key string, value *memFile) (*memFile, bool) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+
+ file, hasFile := m.files.Get(key)
+ m.files.Put(key, value)
+
+ return file, hasFile
+}
+
+func (m *swissMapStorage) Load(key string) (*memFile, bool) {
+ token := m.lock.RLock()
+ defer m.lock.RUnlock(token)
+
+ return m.files.Get(key)
+}
+
+func (m *swissMapStorage) LoadAndCopy(key string) (*memFile, bool) {
+ token := m.lock.RLock()
+ defer m.lock.RUnlock(token)
+
+ file, ok := m.files.Get(key)
+ if !ok {
+ return nil, false
+ }
+
+ newFile := &memFile{
+ memFileInfo: memFileInfo{
+ name: file.name,
+ size: file.size,
+ dir: file.dir,
+ lastMod: file.lastMod,
+ linkTo: file.linkTo,
+ },
+ data: nil,
+ r: nil,
+ }
+
+ if file.data != nil {
+ newFile.data = mem.Get()
+ file.data.WriteTo(newFile.data)
+ }
+
+ return newFile, true
+}
+
+func (m *swissMapStorage) Has(key string) bool {
+ token := m.lock.RLock()
+ defer m.lock.RUnlock(token)
+
+ return m.files.Has(key)
+}
+
+func (m *swissMapStorage) Range(f func(key string, value *memFile) bool) {
+ token := m.lock.RLock()
+ defer m.lock.RUnlock(token)
+
+ m.files.Iter(func(key string, value *memFile) bool {
+ return !f(key, value)
+ })
+}
diff --git a/io/fs/mem_test.go b/io/fs/mem_test.go
index 0f8c1758..be179d5c 100644
--- a/io/fs/mem_test.go
+++ b/io/fs/mem_test.go
@@ -1,7 +1,6 @@
package fs
import (
- "bytes"
"context"
"fmt"
"io"
@@ -30,53 +29,117 @@ func TestMemFromDir(t *testing.T) {
}, names)
}
-func BenchmarkMemList(b *testing.B) {
- mem, err := NewMemFilesystem(MemConfig{})
- require.NoError(b, err)
+func TestWriteWhileRead(t *testing.T) {
+ fs, err := NewMemFilesystem(MemConfig{})
+ require.NoError(t, err)
+
+ _, _, err = fs.WriteFile("/foobar", []byte("xxxxx"))
+ require.NoError(t, err)
+
+ file := fs.Open("/foobar")
+ require.NotNil(t, file)
+
+ _, _, err = fs.WriteFile("/foobar", []byte("yyyyy"))
+ require.NoError(t, err)
+
+ data, err := io.ReadAll(file)
+ require.NoError(t, err)
+ require.Equal(t, []byte("xxxxx"), data)
+}
+
+func TestCopy(t *testing.T) {
+ fs, err := NewMemFilesystem(MemConfig{})
+ require.NoError(t, err)
+
+ _, _, err = fs.WriteFile("/foobar", []byte("xxxxx"))
+ require.NoError(t, err)
+
+ data, err := fs.ReadFile("/foobar")
+ require.NoError(t, err)
+
+ require.Equal(t, []byte("xxxxx"), data)
+
+ err = fs.Copy("/foobar", "/barfoo")
+ require.NoError(t, err)
+
+ data, err = fs.ReadFile("/barfoo")
+ require.NoError(t, err)
+
+ require.Equal(t, []byte("xxxxx"), data)
+
+ fs.Remove("/foobar")
+
+ data, err = fs.ReadFile("/barfoo")
+ require.NoError(t, err)
+
+ require.Equal(t, []byte("xxxxx"), data)
+}
+
+func BenchmarkMemStorages(b *testing.B) {
+ storages := []string{
+ "map",
+ "xsync",
+ "swiss",
+ }
+
+ benchmarks := map[string]func(*testing.B, Filesystem){
+ "list": benchmarkMemList,
+ "removeList": benchmarkMemRemoveList,
+ "readFile": benchmarkMemReadFile,
+ "writeFile": benchmarkMemWriteFile,
+ "readWhileWrite": benchmarkMemReadFileWhileWriting,
+ }
+ for name, fn := range benchmarks {
+ for _, storage := range storages {
+ mem, err := NewMemFilesystem(MemConfig{Storage: storage})
+ require.NoError(b, err)
+
+ b.Run(name+"-"+storage, func(b *testing.B) {
+ fn(b, mem)
+ })
+ }
+ }
+}
+
+func benchmarkMemList(b *testing.B, fs Filesystem) {
for i := 0; i < 1000; i++ {
id := rand.StringAlphanumeric(8)
path := fmt.Sprintf("/%d/%s.dat", i, id)
- mem.WriteFile(path, []byte("foobar"))
+ fs.WriteFile(path, []byte("foobar"))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- mem.List("/", ListOptions{
+ fs.List("/", ListOptions{
Pattern: "/5/**",
})
}
}
-func BenchmarkMemRemoveList(b *testing.B) {
- mem, err := NewMemFilesystem(MemConfig{})
- require.NoError(b, err)
-
+func benchmarkMemRemoveList(b *testing.B, fs Filesystem) {
for i := 0; i < 1000; i++ {
id := rand.StringAlphanumeric(8)
path := fmt.Sprintf("/%d/%s.dat", i, id)
- mem.WriteFile(path, []byte("foobar"))
+ fs.WriteFile(path, []byte("foobar"))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
- mem.RemoveList("/", ListOptions{
+ fs.RemoveList("/", ListOptions{
Pattern: "/5/**",
})
}
}
-func BenchmarkMemReadFile(b *testing.B) {
- mem, err := NewMemFilesystem(MemConfig{})
- require.NoError(b, err)
-
+func benchmarkMemReadFile(b *testing.B, fs Filesystem) {
nFiles := 1000
for i := 0; i < nFiles; i++ {
path := fmt.Sprintf("/%d.dat", i)
- mem.WriteFile(path, []byte(rand.StringAlphanumeric(2*1024)))
+ fs.WriteFile(path, []byte(rand.StringAlphanumeric(2*1024)))
}
r := gorand.New(gorand.NewSource(42))
@@ -85,52 +148,28 @@ func BenchmarkMemReadFile(b *testing.B) {
for i := 0; i < b.N; i++ {
num := r.Intn(nFiles)
- f := mem.Open("/" + strconv.Itoa(num) + ".dat")
+ f := fs.Open("/" + strconv.Itoa(num) + ".dat")
f.Close()
}
}
-func TestWriteWhileRead(t *testing.T) {
- fs, err := NewMemFilesystem(MemConfig{})
- require.NoError(t, err)
-
- _, _, err = fs.WriteFile("/foobar", []byte("xxxxx"))
- require.NoError(t, err)
-
- file := fs.Open("/foobar")
- require.NotNil(t, file)
-
- _, _, err = fs.WriteFile("/foobar", []byte("yyyyy"))
- require.NoError(t, err)
-
- data, err := io.ReadAll(file)
- require.NoError(t, err)
- require.Equal(t, []byte("xxxxx"), data)
-}
-
-func BenchmarkMemWriteFile(b *testing.B) {
- mem, err := NewMemFilesystem(MemConfig{})
- require.NoError(b, err)
-
+func benchmarkMemWriteFile(b *testing.B, fs Filesystem) {
nFiles := 50000
for i := 0; i < nFiles; i++ {
path := fmt.Sprintf("/%d.dat", i)
- mem.WriteFile(path, []byte(rand.StringAlphanumeric(1)))
+ fs.WriteFile(path, []byte(rand.StringAlphanumeric(1)))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
path := fmt.Sprintf("/%d.dat", i%nFiles)
- mem.WriteFile(path, []byte(rand.StringAlphanumeric(1)))
+ fs.WriteFile(path, []byte(rand.StringAlphanumeric(1)))
}
}
-func BenchmarkMemReadFileWhileWriting(b *testing.B) {
- mem, err := NewMemFilesystem(MemConfig{})
- require.NoError(b, err)
-
+func benchmarkMemReadFileWhileWriting(b *testing.B, fs Filesystem) {
nReaders := 500
nWriters := 1000
nFiles := 30
@@ -148,7 +187,7 @@ func BenchmarkMemReadFileWhileWriting(b *testing.B) {
go func(ctx context.Context, from int) {
for i := 0; i < nFiles; i++ {
path := fmt.Sprintf("/%d.dat", from+i)
- mem.WriteFile(path, data)
+ fs.WriteFile(path, data)
}
ticker := time.NewTicker(40 * time.Millisecond)
@@ -163,7 +202,7 @@ func BenchmarkMemReadFileWhileWriting(b *testing.B) {
case <-ticker.C:
num := gorand.Intn(nFiles) + from
path := fmt.Sprintf("/%d.dat", num)
- mem.WriteFile(path, data)
+ fs.WriteFile(path, data)
}
}
}(ctx, i*nFiles)
@@ -183,7 +222,7 @@ func BenchmarkMemReadFileWhileWriting(b *testing.B) {
for i := 0; i < b.N; i++ {
num := gorand.Intn(nWriters * nFiles)
- f := mem.Open("/" + strconv.Itoa(num) + ".dat")
+ f := fs.Open("/" + strconv.Itoa(num) + ".dat")
f.Close()
}
}()
@@ -191,36 +230,3 @@ func BenchmarkMemReadFileWhileWriting(b *testing.B) {
readerWg.Wait()
}
-
-func BenchmarkBufferReadFrom(b *testing.B) {
- data := []byte(rand.StringAlphanumeric(1024 * 1024))
-
- for i := 0; i < b.N; i++ {
- r := bytes.NewReader(data)
- buf := &bytes.Buffer{}
- buf.ReadFrom(r)
- }
-}
-
-func TestBufferReadChunks(t *testing.T) {
- data := []byte(rand.StringAlphanumeric(1024 * 1024))
-
- r := bytes.NewReader(data)
- buf := &bytes.Buffer{}
-
- copyToBufferFromReader(buf, r, 32*1024)
-
- res := bytes.Compare(data, buf.Bytes())
- require.Equal(t, 0, res)
-}
-
-func BenchmarkBufferReadChunks(b *testing.B) {
- data := []byte(rand.StringAlphanumeric(1024 * 1024))
-
- for i := 0; i < b.N; i++ {
- r := bytes.NewReader(data)
- buf := &bytes.Buffer{}
-
- copyToBufferFromReader(buf, r, 32*1024)
- }
-}
diff --git a/io/fs/memtest/.gitignore b/io/fs/memtest/.gitignore
new file mode 100644
index 00000000..f43fea82
--- /dev/null
+++ b/io/fs/memtest/.gitignore
@@ -0,0 +1 @@
+memtest
diff --git a/io/fs/memtest/memtest.go b/io/fs/memtest/memtest.go
new file mode 100644
index 00000000..73828e6e
--- /dev/null
+++ b/io/fs/memtest/memtest.go
@@ -0,0 +1,247 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ gorand "math/rand/v2"
+ "os"
+ "os/signal"
+ "runtime"
+ "runtime/debug"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/datarhei/core/v16/io/fs"
+ "github.com/datarhei/core/v16/math/rand"
+
+ "github.com/google/gops/agent"
+)
+
+func main() {
+ oStorage := "mapof"
+ oWriters := 500
+ oReaders := 1000
+ oFiles := 15
+ oInterval := 1 // seconds
+ oSize := 2 // megabytes
+ oLimit := -1
+ oGC := 0
+ oFree := 0
+
+ flag.StringVar(&oStorage, "storage", "mapof", "type of mem storage implementation (mapof, map, swiss)")
+ flag.IntVar(&oWriters, "writers", 500, "number of concurrent writers")
+ flag.IntVar(&oReaders, "readers", 1000, "number of concurrent readers")
+ flag.IntVar(&oFiles, "files", 15, "number of files to keep per writer")
+ flag.IntVar(&oInterval, "interval", 1, "interval for writing files in seconds")
+ flag.IntVar(&oSize, "size", 2048, "size of files to write in kilobytes")
+ flag.IntVar(&oLimit, "limit", -1, "set memory limit, 0 for automatic, otherwise memory in MB")
+ flag.IntVar(&oGC, "gc", 100, "GC percentage")
+ flag.IntVar(&oFree, "free", 0, "force freeing memory")
+
+ flag.Parse()
+
+ estimatedSize := float64(oWriters*oFiles*oSize) / 1024 / 1024
+
+ fmt.Printf("Expecting effective memory consumption of %.1fGB\n", estimatedSize)
+
+ if oLimit >= 0 {
+ limitSize := estimatedSize * 1.5 * 1024 * 1024 * 1024
+ if oLimit > 0 {
+ limitSize = float64(oLimit) * 1024 * 1024
+ }
+
+ fmt.Printf("Setting memory limit to %.1fGB\n", limitSize/1024/1024/1024)
+ debug.SetMemoryLimit(int64(limitSize))
+ }
+
+ memfs, err := fs.NewMemFilesystem(fs.MemConfig{
+ Storage: oStorage,
+ })
+
+ if err != nil {
+ log.Fatalf("acquiring new memfs: %s", err.Error())
+ }
+
+ err = agent.Listen(agent.Options{
+ Addr: ":9000",
+ ReuseSocketAddrAndPort: true,
+ })
+
+ if err != nil {
+ log.Fatalf("starting agent: %s", err.Error())
+ }
+
+ fmt.Printf("Started agent on :9000\n")
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ wgWriter := sync.WaitGroup{}
+
+ for i := 0; i < oWriters; i++ {
+ fmt.Printf("%4d / %4d writer started\r", i+1, oWriters)
+
+ wgWriter.Add(1)
+
+ go func(ctx context.Context, memfs fs.Filesystem, index int, nfiles int64, interval time.Duration) {
+ defer wgWriter.Done()
+
+ jitter := gorand.IntN(200)
+ interval += time.Duration(jitter) * time.Millisecond
+
+ sequence := int64(0)
+
+ buf := bytes.NewBufferString(rand.StringAlphanumeric(oSize * (1024 + jitter - 100)))
+ r := bytes.NewReader(buf.Bytes())
+
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ path := fmt.Sprintf("/foobar/test_%d_%06d.dat", index, sequence)
+
+ // Write file to memfs
+ r.Seek(0, io.SeekStart)
+ memfs.WriteFileReader(path, r, -1)
+
+ // Delete file from memfs
+ if sequence-nfiles >= 0 {
+ path = fmt.Sprintf("/foobar/test_%d_%06d.dat", index, sequence-nfiles)
+ memfs.Remove(path)
+ }
+
+ path = fmt.Sprintf("/foobar/test_%d.last", index)
+ memfs.WriteFile(path, []byte(strconv.FormatInt(sequence, 10)))
+
+ sequence++
+ }
+ }
+ }(ctx, memfs, i, int64(oFiles), time.Duration(oInterval)*time.Second)
+ }
+
+ fmt.Printf("\n")
+
+ wgReader := sync.WaitGroup{}
+
+ if oReaders > 0 {
+ for i := 0; i < oReaders; i++ {
+ fmt.Printf("%4d / %4d reader started\r", i+1, oReaders)
+
+ wgReader.Add(1)
+
+ go func(ctx context.Context, memfs fs.Filesystem, interval time.Duration) {
+ defer wgReader.Done()
+
+ buf := bytes.Buffer{}
+
+ jitter := gorand.IntN(200)
+ interval += time.Duration(jitter) * time.Millisecond
+
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ index := gorand.IntN(oWriters)
+
+ path := fmt.Sprintf("/foobar/test_%d.list", index)
+ data, err := memfs.ReadFile(path)
+ if err != nil {
+ continue
+ }
+
+ sequence, err := strconv.ParseUint(string(data), 10, 64)
+ if err != nil {
+ continue
+ }
+
+ path = fmt.Sprintf("/foobar/test_%d_%06d.dat", index, sequence)
+ file := memfs.Open(path)
+
+ buf.ReadFrom(file)
+ buf.Reset()
+ }
+ }
+ }(ctx, memfs, time.Duration(oInterval)*time.Second)
+ }
+
+ fmt.Printf("\n")
+ }
+
+ go func(ctx context.Context, memfs fs.Filesystem) {
+ ticker := time.NewTicker(2 * time.Second)
+ defer ticker.Stop()
+
+ nMallocs := uint64(0)
+ m := runtime.MemStats{}
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ runtime.ReadMemStats(&m)
+
+ size, _ := memfs.Size()
+ fmt.Printf("%5.1fGB ", float64(size)/1024/1024/1024)
+
+ listfiles := 0
+ listsize := int64(0)
+ files := memfs.List("/", fs.ListOptions{})
+ for _, f := range files {
+ listsize += f.Size()
+ }
+ listfiles = len(files)
+
+ fmt.Printf("(%7d files with %5.1fGB) ", listfiles, float64(listsize)/1024/1024/1024)
+
+ fmt.Printf("alloc=%5.1fGB (%8.1fGB) sys=%5.1fGB idle=%5.1fGB inuse=%5.1fGB mallocs=%d objects=%d\n", float64(m.HeapAlloc)/1024/1024/1024, float64(m.TotalAlloc)/1024/1024/1024, float64(m.HeapSys)/1024/1024/1024, float64(m.HeapIdle)/1024/1024/1024, float64(m.HeapInuse)/1024/1024/1024, m.Mallocs-nMallocs, m.Mallocs-m.Frees)
+
+ nMallocs = m.Mallocs
+ }
+ }
+ }(ctx, memfs)
+
+ debug.SetGCPercent(oGC)
+
+ if oFree > 0 {
+ go func(ctx context.Context) {
+ ticker := time.NewTicker(time.Duration(oFree) * time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ debug.FreeOSMemory()
+ }
+ }
+ }(ctx)
+ }
+
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, os.Interrupt)
+ <-quit
+
+ cancel()
+
+ fmt.Printf("Waiting for readers to stop ...\n")
+ wgReader.Wait()
+
+ fmt.Printf("Waiting for writers to stop ...\n")
+ wgWriter.Wait()
+
+ fmt.Printf("Done\n")
+}
diff --git a/io/fs/s3.go b/io/fs/s3.go
index c75162ed..316fd7ca 100644
--- a/io/fs/s3.go
+++ b/io/fs/s3.go
@@ -14,6 +14,7 @@ import (
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/log"
+ "github.com/datarhei/core/v16/mem"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
@@ -275,7 +276,7 @@ func (fs *s3Filesystem) ReadFile(path string) ([]byte, error) {
defer file.Close()
- buf := &bytes.Buffer{}
+ buf := mem.Get() // here we take out a buffer for good
_, err := buf.ReadFrom(file)
if err != nil {
@@ -360,6 +361,27 @@ func (fs *s3Filesystem) WriteFileSafe(path string, data []byte) (int64, bool, er
return fs.WriteFileReader(path, bytes.NewReader(data), len(data))
}
+func (fs *s3Filesystem) AppendFileReader(path string, r io.Reader, sizeHint int) (int64, error) {
+ path = fs.cleanPath(path)
+
+ ctx := context.Background()
+
+ object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
+ if err != nil {
+ size, _, err := fs.write(path, r)
+ return size, err
+ }
+
+ buffer := mem.Get()
+ defer mem.Put(buffer)
+
+ buffer.ReadFrom(object)
+ buffer.ReadFrom(r)
+
+ size, _, err := fs.write(path, buffer.Reader())
+ return size, err
+}
+
func (fs *s3Filesystem) Rename(src, dst string) error {
src = fs.cleanPath(src)
dst = fs.cleanPath(dst)
diff --git a/io/fs/sized.go b/io/fs/sized.go
index aa6b552b..9e64729f 100644
--- a/io/fs/sized.go
+++ b/io/fs/sized.go
@@ -4,6 +4,8 @@ import (
"bytes"
"fmt"
"io"
+
+ "github.com/datarhei/core/v16/mem"
)
type SizedFilesystem interface {
@@ -71,8 +73,10 @@ func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader, sizeHint in
return r.Filesystem.WriteFileReader(path, rd, sizeHint)
}
- data := bytes.Buffer{}
- size, err := copyToBufferFromReader(&data, rd, 8*1024)
+ data := mem.Get()
+ defer mem.Put(data)
+
+ size, err := data.ReadFrom(rd)
if err != nil {
return -1, false, err
}
@@ -97,7 +101,7 @@ func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader, sizeHint in
}
}
- return r.Filesystem.WriteFileReader(path, &data, int(size))
+ return r.Filesystem.WriteFileReader(path, data.Reader(), int(size))
}
func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
@@ -135,34 +139,42 @@ func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool,
return r.Filesystem.WriteFileSafe(path, data)
}
-func (r *sizedFilesystem) Purge(size int64) int64 {
- if purger, ok := r.Filesystem.(PurgeFilesystem); ok {
- return purger.Purge(size)
+func (r *sizedFilesystem) AppendFileReader(path string, rd io.Reader, sizeHint int) (int64, error) {
+ currentSize, maxSize := r.Size()
+ if maxSize <= 0 {
+ return r.Filesystem.AppendFileReader(path, rd, sizeHint)
}
- return 0
- /*
- files := r.Filesystem.List("/", "")
+ data := mem.Get()
+ defer mem.Put(data)
- sort.Slice(files, func(i, j int) bool {
- return files[i].ModTime().Before(files[j].ModTime())
- })
+ size, err := data.ReadFrom(rd)
+ if err != nil {
+ return -1, err
+ }
- var freed int64 = 0
+ // Calculate the new size of the filesystem
+ newSize := currentSize + size
- for _, f := range files {
- r.Filesystem.Remove(f.Name())
- size -= f.Size()
- freed += f.Size()
- r.currentSize -= f.Size()
+ // If the the new size is larger than the allowed size, we have to free
+ // some space.
+ if newSize > maxSize {
+ if !r.purge {
+ return -1, fmt.Errorf("not enough space on device")
+ }
- if size <= 0 {
- break
- }
- }
+ if r.Purge(size) < size {
+ return -1, fmt.Errorf("not enough space on device")
+ }
+ }
- files = nil
+ return r.Filesystem.AppendFileReader(path, data.Reader(), int(size))
+}
- return freed
- */
+func (r *sizedFilesystem) Purge(size int64) int64 {
+ if purger, ok := r.Filesystem.(PurgeFilesystem); ok {
+ return purger.Purge(size)
+ }
+
+ return 0
}
diff --git a/math/average/sma.go b/math/average/sma.go
new file mode 100644
index 00000000..d31e0cd1
--- /dev/null
+++ b/math/average/sma.go
@@ -0,0 +1,118 @@
+package average
+
+import (
+ "container/ring"
+ "errors"
+ gotime "time"
+
+ "github.com/datarhei/core/v16/time"
+)
+
+type SMA struct {
+ ts time.Source
+ window int64
+ granularity int64
+ size int
+ last int64
+ samples *ring.Ring
+}
+
+var ErrWindow = errors.New("window size must be positive")
+var ErrGranularity = errors.New("granularity must be positive")
+var ErrMultiplier = errors.New("window size has to be a multiplier of the granularity size")
+
+func NewSMA(window, granularity gotime.Duration) (*SMA, error) {
+ if window <= 0 {
+ return nil, ErrWindow
+ }
+
+ if granularity <= 0 {
+ return nil, ErrGranularity
+ }
+
+ if window <= granularity || window%granularity != 0 {
+ return nil, ErrMultiplier
+ }
+
+ s := &SMA{
+ ts: &time.StdSource{},
+ window: window.Nanoseconds(),
+ granularity: granularity.Nanoseconds(),
+ }
+
+ s.init()
+
+ return s, nil
+}
+
+func (s *SMA) init() {
+ s.size = int(s.window / s.granularity)
+ s.samples = ring.New(s.size)
+
+ s.Reset()
+
+ now := s.ts.Now().UnixNano()
+ s.last = now - now%s.granularity
+}
+
+func (s *SMA) Add(v float64) {
+ now := s.ts.Now().UnixNano()
+ now -= now % s.granularity
+
+ n := (now - s.last) / s.granularity
+
+ if n >= int64(s.samples.Len()) {
+ // zero everything
+ s.Reset()
+ } else {
+ for i := n; i > 0; i-- {
+ s.samples = s.samples.Next()
+ s.samples.Value = float64(0)
+ }
+ }
+
+ s.samples.Value = s.samples.Value.(float64) + v
+
+ s.last = now
+}
+
+func (s *SMA) AddAndAverage(v float64) float64 {
+ s.Add(v)
+
+ total := float64(0)
+
+ s.samples.Do(func(v any) {
+ total += v.(float64)
+ })
+
+ return total / float64(s.samples.Len())
+}
+
+func (s *SMA) Average() float64 {
+ total, samplecount := s.Total()
+
+ return total / float64(samplecount)
+}
+
+func (s *SMA) Reset() {
+ n := s.samples.Len()
+
+ // Initialize the ring buffer with 0 values.
+ for i := 0; i < n; i++ {
+ s.samples.Value = float64(0)
+ s.samples = s.samples.Next()
+ }
+}
+
+func (s *SMA) Total() (float64, int) {
+ // Propagate the ringbuffer
+ s.Add(0)
+
+ total := float64(0)
+
+ s.samples.Do(func(v any) {
+ total += v.(float64)
+ })
+
+ return total, s.samples.Len()
+}
diff --git a/math/average/sma_test.go b/math/average/sma_test.go
new file mode 100644
index 00000000..4e650221
--- /dev/null
+++ b/math/average/sma_test.go
@@ -0,0 +1,301 @@
+package average
+
+import (
+ "testing"
+ "time"
+
+ timesrc "github.com/datarhei/core/v16/time"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewSMA(t *testing.T) {
+ _, err := NewSMA(time.Second, time.Second)
+ require.Error(t, err)
+ require.ErrorIs(t, err, ErrMultiplier)
+
+ _, err = NewSMA(time.Second, 2*time.Second)
+ require.Error(t, err)
+ require.ErrorIs(t, err, ErrMultiplier)
+
+ _, err = NewSMA(3*time.Second, 2*time.Second)
+ require.Error(t, err)
+ require.ErrorIs(t, err, ErrMultiplier)
+
+ _, err = NewSMA(0, time.Second)
+ require.Error(t, err)
+ require.ErrorIs(t, err, ErrWindow)
+
+ _, err = NewSMA(time.Second, 0)
+ require.Error(t, err)
+ require.ErrorIs(t, err, ErrGranularity)
+
+ sme, err := NewSMA(10*time.Second, time.Second)
+ require.NoError(t, err)
+ require.NotNil(t, sme)
+}
+
+func TestAddSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: time.Second.Nanoseconds(),
+ granularity: time.Millisecond.Nanoseconds(),
+ }
+ sme.init()
+
+ sme.Add(42)
+
+ total, samplecount := sme.Total()
+ require.Equal(t, float64(42), total)
+ require.Equal(t, int(time.Second/time.Millisecond), samplecount)
+
+ sme.Add(5)
+
+ total, samplecount = sme.Total()
+ require.Equal(t, float64(47), total)
+ require.Equal(t, int(time.Second/time.Millisecond), samplecount)
+
+ ts.Set(5, 0)
+
+ total, samplecount = sme.Total()
+ require.Equal(t, float64(0), total)
+ require.Equal(t, int(time.Second/time.Millisecond), samplecount)
+}
+
+func TestAverageSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: time.Second.Nanoseconds(),
+ granularity: time.Millisecond.Nanoseconds(),
+ }
+ sme.init()
+
+ sme.Add(42)
+
+ avg := sme.Average()
+ require.Equal(t, 42.0/1000, avg)
+
+ sme.Add(5)
+
+ avg = sme.Average()
+ require.Equal(t, 47.0/1000, avg)
+
+ ts.Set(5, 0)
+
+ avg = sme.Average()
+ require.Equal(t, .0/1000, avg)
+}
+
+func TestAddAndAverageSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: time.Second.Nanoseconds(),
+ granularity: time.Millisecond.Nanoseconds(),
+ }
+ sme.init()
+
+ avg := sme.AddAndAverage(42)
+ require.Equal(t, 42.0/1000, avg)
+
+ avg = sme.AddAndAverage(5)
+ require.Equal(t, 47.0/1000, avg)
+
+ ts.Set(5, 0)
+
+ avg = sme.Average()
+ require.Equal(t, .0/1000, avg)
+}
+
+func TestAverageSeriesSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: 10 * time.Second.Nanoseconds(),
+ granularity: time.Second.Nanoseconds(),
+ }
+ sme.init()
+
+ sme.Add(42) // [42, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(1, 0)
+
+ sme.Add(5) // [5, 42, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(2, 0)
+
+ sme.Add(18) // [18, 5, 42, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(3, 0)
+
+ sme.Add(47) // [47, 18, 5, 42, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(4, 0)
+
+ sme.Add(92) // [92, 47, 18, 5, 42, 0, 0, 0, 0, 0]
+
+ ts.Set(5, 0)
+
+ sme.Add(2) // [2, 92, 47, 18, 5, 42, 0, 0, 0, 0]
+
+ ts.Set(6, 0)
+
+ sme.Add(75) // [75, 2, 92, 47, 18, 5, 42, 0, 0, 0]
+
+ ts.Set(7, 0)
+
+ sme.Add(33) // [33, 75, 2, 92, 47, 18, 5, 42, 0, 0]
+
+ ts.Set(8, 0)
+
+ sme.Add(89) // [89, 33, 75, 2, 92, 47, 18, 5, 42, 0]
+
+ ts.Set(9, 0)
+
+ sme.Add(12) // [12, 89, 33, 75, 2, 92, 47, 18, 5, 42]
+
+ avg := sme.Average()
+ require.Equal(t, (12+89+33+75+2+92+47+18+5+42)/10., avg)
+
+ ts.Set(10, 0)
+
+ avg = sme.Average()
+ require.Equal(t, (12+89+33+75+2+92+47+18+5)/10., avg)
+
+ ts.Set(15, 0)
+
+ avg = sme.Average()
+ require.Equal(t, (12+89+33+75)/10., avg)
+
+ ts.Set(19, 0)
+
+ avg = sme.Average()
+ require.Equal(t, (0)/10., avg)
+}
+
+func TestResetSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: 10 * time.Second.Nanoseconds(),
+ granularity: time.Second.Nanoseconds(),
+ }
+ sme.init()
+
+ sme.Add(42) // [42, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(1, 0)
+
+ sme.Add(5) // [5, 42, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(2, 0)
+
+ sme.Add(18) // [18, 5, 42, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(3, 0)
+
+ sme.Add(47) // [47, 18, 5, 42, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(4, 0)
+
+ sme.Add(92) // [92, 47, 18, 5, 42, 0, 0, 0, 0, 0]
+
+ ts.Set(5, 0)
+
+ sme.Add(2) // [2, 92, 47, 18, 5, 42, 0, 0, 0, 0]
+
+ ts.Set(6, 0)
+
+ sme.Add(75) // [75, 2, 92, 47, 18, 5, 42, 0, 0, 0]
+
+ ts.Set(7, 0)
+
+ sme.Add(33) // [33, 75, 2, 92, 47, 18, 5, 42, 0, 0]
+
+ ts.Set(8, 0)
+
+ sme.Add(89) // [89, 33, 75, 2, 92, 47, 18, 5, 42, 0]
+
+ ts.Set(9, 0)
+
+ sme.Add(12) // [12, 89, 33, 75, 2, 92, 47, 18, 5, 42]
+
+ avg := sme.Average()
+ require.Equal(t, (12+89+33+75+2+92+47+18+5+42)/10., avg)
+
+ sme.Reset()
+
+ avg = sme.Average()
+ require.Equal(t, 0/10., avg)
+}
+
+func TestTotalSMA(t *testing.T) {
+ ts := ×rc.TestSource{
+ N: time.Unix(0, 0),
+ }
+
+ sme := &SMA{
+ ts: ts,
+ window: 10 * time.Second.Nanoseconds(),
+ granularity: time.Second.Nanoseconds(),
+ }
+ sme.init()
+
+ sme.Add(42) // [42, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(1, 0)
+
+ sme.Add(5) // [5, 42, 0, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(2, 0)
+
+ sme.Add(18) // [18, 5, 42, 0, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(3, 0)
+
+ sme.Add(47) // [47, 18, 5, 42, 0, 0, 0, 0, 0, 0]
+
+ ts.Set(4, 0)
+
+ sme.Add(92) // [92, 47, 18, 5, 42, 0, 0, 0, 0, 0]
+
+ ts.Set(5, 0)
+
+ sme.Add(2) // [2, 92, 47, 18, 5, 42, 0, 0, 0, 0]
+
+ ts.Set(6, 0)
+
+ sme.Add(75) // [75, 2, 92, 47, 18, 5, 42, 0, 0, 0]
+
+ ts.Set(7, 0)
+
+ sme.Add(33) // [33, 75, 2, 92, 47, 18, 5, 42, 0, 0]
+
+ ts.Set(8, 0)
+
+ sme.Add(89) // [89, 33, 75, 2, 92, 47, 18, 5, 42, 0]
+
+ ts.Set(9, 0)
+
+ sme.Add(12) // [12, 89, 33, 75, 2, 92, 47, 18, 5, 42]
+
+ total, nsamples := sme.Total()
+ require.Equal(t, float64(12+89+33+75+2+92+47+18+5+42), total)
+ require.Equal(t, 10, nsamples)
+}
diff --git a/math/rand/rand.go b/math/rand/rand.go
index 5f81ad90..fead355c 100644
--- a/math/rand/rand.go
+++ b/math/rand/rand.go
@@ -21,6 +21,12 @@ var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))
var lock sync.Mutex
func StringWithCharset(length int, charset string) string {
+ b := BytesWithCharset(length, charset)
+
+ return string(b)
+}
+
+func BytesWithCharset(length int, charset string) []byte {
lock.Lock()
defer lock.Unlock()
@@ -29,7 +35,7 @@ func StringWithCharset(length int, charset string) string {
b[i] = charset[seededRand.Intn(len(charset))]
}
- return string(b)
+ return b
}
func StringLetters(length int) string {
@@ -47,3 +53,7 @@ func StringAlphanumeric(length int) string {
func String(length int) string {
return StringWithCharset(length, CharsetAll)
}
+
+func Bytes(length int) []byte {
+ return BytesWithCharset(length, CharsetAll)
+}
diff --git a/mem/buffer.go b/mem/buffer.go
new file mode 100644
index 00000000..808676fa
--- /dev/null
+++ b/mem/buffer.go
@@ -0,0 +1,163 @@
+package mem
+
+// Based on github.com/valyala/bytebufferpool
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+type Buffer struct {
+ data []byte
+}
+
+// Len returns the length of the buffer.
+func (b *Buffer) Len() int {
+ return len(b.data)
+}
+
+// Bytes returns the buffer, but keeps ownership.
+func (b *Buffer) Bytes() []byte {
+ return b.data
+}
+
+// WriteTo writes the bytes to the writer.
+func (b *Buffer) WriteTo(w io.Writer) (int64, error) {
+ n, err := w.Write(b.data)
+ return int64(n), err
+}
+
+// Reset empties the buffer and keeps it's capacity.
+func (b *Buffer) Reset() {
+ b.data = b.data[:0]
+}
+
+// Write appends to the buffer.
+func (b *Buffer) Write(p []byte) (int, error) {
+ b.data = append(b.data, p...)
+ return len(p), nil
+}
+
+// ReadFrom reads from the reader and appends to the buffer.
+func (b *Buffer) ReadFrom(r io.Reader) (int64, error) {
+ /*
+ chunkData := [128 * 1024]byte{}
+ chunk := chunkData[0:]
+
+ size := int64(0)
+
+ for {
+ n, err := r.Read(chunk)
+ if n != 0 {
+ b.data = append(b.data, chunk[:n]...)
+ size += int64(n)
+ }
+
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return size, nil
+ }
+
+ return size, err
+ }
+
+ if n == 0 {
+ break
+ }
+ }
+
+ return size, nil
+ */
+ p := b.data
+ nStart := int64(len(p))
+ nMax := int64(cap(p))
+ n := nStart
+ if nMax == 0 {
+ nMax = 64
+ p = make([]byte, nMax)
+ } else {
+ p = p[:nMax]
+ }
+ for {
+ if n == nMax {
+ nMax *= 2
+ bNew := make([]byte, nMax)
+ copy(bNew, p)
+ p = bNew
+ }
+ nn, err := r.Read(p[n:])
+ n += int64(nn)
+ if err != nil {
+ b.data = p[:n]
+ n -= nStart
+ if errors.Is(err, io.EOF) {
+ return n, nil
+ }
+ return n, err
+ }
+ }
+ /*
+ if br, ok := r.(*bytes.Reader); ok {
+ if cap(b.data) < br.Len() {
+ data := make([]byte, br.Len())
+ copy(data, b.data)
+ b.data = data
+ }
+ }
+
+ chunkData := [128 * 1024]byte{}
+ chunk := chunkData[0:]
+
+ size := int64(0)
+
+ for {
+ n, err := r.Read(chunk)
+ if n != 0 {
+ if cap(b.data) < len(b.data)+n {
+ data := make([]byte, cap(b.data)+1024*1024)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data = append(b.data, chunk[:n]...)
+ size += int64(n)
+ }
+
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return size, nil
+ }
+
+ return size, err
+ }
+
+ if n == 0 {
+ break
+ }
+ }
+
+ return size, nil
+ */
+}
+
+// WriteByte appends a byte to the buffer.
+func (b *Buffer) WriteByte(c byte) error {
+ b.data = append(b.data, c)
+ return nil
+}
+
+// WriteString appends a string to the buffer.
+func (b *Buffer) WriteString(s string) (n int, err error) {
+ b.data = append(b.data, s...)
+ return len(s), nil
+}
+
+// Reader returns a bytes.Reader based on the data in the buffer.
+func (b *Buffer) Reader() *bytes.Reader {
+ return bytes.NewReader(b.data)
+}
+
+// String returns the data in the buffer a string.
+func (b *Buffer) String() string {
+ return string(b.data)
+}
diff --git a/mem/buffer_test.go b/mem/buffer_test.go
new file mode 100644
index 00000000..a19c1a72
--- /dev/null
+++ b/mem/buffer_test.go
@@ -0,0 +1,47 @@
+package mem
+
+import (
+ "bytes"
+ "io"
+ "testing"
+
+ "github.com/datarhei/core/v16/math/rand"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBufferReadChunks(t *testing.T) {
+ data := []byte(rand.StringAlphanumeric(1024 * 1024))
+
+ r := bytes.NewReader(data)
+ buf := &Buffer{}
+
+ buf.ReadFrom(r)
+
+ res := bytes.Compare(data, buf.Bytes())
+ require.Equal(t, 0, res)
+}
+
+func BenchmarkBufferReadFrom(b *testing.B) {
+ data := []byte(rand.StringAlphanumeric(1024 * 1024))
+
+ r := bytes.NewReader(data)
+
+ for i := 0; i < b.N; i++ {
+ r.Seek(0, io.SeekStart)
+ buf := &Buffer{}
+ buf.ReadFrom(r)
+ }
+}
+
+func BenchmarkBytesBufferReadFrom(b *testing.B) {
+ data := []byte(rand.StringAlphanumeric(1024 * 1024))
+
+ r := bytes.NewReader(data)
+
+ for i := 0; i < b.N; i++ {
+ r.Seek(0, io.SeekStart)
+ buf := &bytes.Buffer{}
+ buf.ReadFrom(r)
+ }
+}
diff --git a/mem/pool.go b/mem/pool.go
new file mode 100644
index 00000000..23a7c1d6
--- /dev/null
+++ b/mem/pool.go
@@ -0,0 +1,188 @@
+package mem
+
+// Based on github.com/valyala/bytebufferpool
+
+import (
+ "sort"
+ "sync"
+ "sync/atomic"
+)
+
+const (
+ minBitSize = 6 // 2**6=64 is a CPU cache line size
+ steps = 20
+
+ minSize = 1 << minBitSize
+ maxSize = 1 << (minBitSize + steps - 1)
+
+ calibrateCallsThreshold = 42000
+ maxPercentile = 0.95
+)
+
+type BufferPool struct {
+ calls [steps]uint64
+ calibrating uint64
+
+ defaultSize uint64
+ maxSize uint64
+
+ alloc uint64
+ reuse uint64
+ recycle uint64
+ dump uint64
+
+ pool sync.Pool
+}
+
+type PoolStats struct {
+ Alloc uint64
+ Reuse uint64
+ Recycle uint64
+ Dump uint64
+
+ DefaultSize uint64
+ MaxSize uint64
+}
+
+func NewBufferPool() *BufferPool {
+ p := &BufferPool{
+ pool: sync.Pool{},
+ }
+
+ return p
+}
+
+func (p *BufferPool) Stats() PoolStats {
+ s := PoolStats{
+ Alloc: atomic.LoadUint64(&p.alloc),
+ Reuse: atomic.LoadUint64(&p.reuse),
+ Recycle: atomic.LoadUint64(&p.recycle),
+ Dump: atomic.LoadUint64(&p.dump),
+ DefaultSize: atomic.LoadUint64(&p.defaultSize),
+ MaxSize: atomic.LoadUint64(&p.maxSize),
+ }
+
+ return s
+}
+
+func (p *BufferPool) Get() *Buffer {
+ v := p.pool.Get()
+ if v != nil {
+ atomic.AddUint64(&p.reuse, 1)
+ return v.(*Buffer)
+ }
+
+ atomic.AddUint64(&p.alloc, 1)
+
+ return &Buffer{
+ data: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)),
+ }
+}
+
+func (p *BufferPool) Put(buf *Buffer) {
+ idx := index(len(buf.data))
+
+ if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold {
+ p.calibrate()
+ }
+
+ maxSize := int(atomic.LoadUint64(&p.maxSize))
+ if maxSize == 0 || cap(buf.data) <= maxSize {
+ buf.Reset()
+ p.pool.Put(buf)
+
+ atomic.AddUint64(&p.recycle, 1)
+ } else {
+ atomic.AddUint64(&p.dump, 1)
+ }
+}
+
+func (p *BufferPool) calibrate() {
+ if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) {
+ return
+ }
+
+ a := make(callSizes, 0, steps)
+ var callsSum uint64
+ for i := uint64(0); i < steps; i++ {
+ calls := atomic.SwapUint64(&p.calls[i], 0)
+ callsSum += calls
+ a = append(a, callSize{
+ calls: calls,
+ size: minSize << i,
+ })
+ }
+ sort.Sort(a)
+
+ defaultSize := a[0].size
+ maxSize := defaultSize
+
+ maxSum := uint64(float64(callsSum) * maxPercentile)
+ callsSum = 0
+ for i := 0; i < steps; i++ {
+ if callsSum > maxSum {
+ break
+ }
+ callsSum += a[i].calls
+ size := a[i].size
+ if size > maxSize {
+ maxSize = size
+ }
+ }
+
+ atomic.StoreUint64(&p.defaultSize, defaultSize)
+ atomic.StoreUint64(&p.maxSize, maxSize)
+
+ atomic.StoreUint64(&p.calibrating, 0)
+}
+
+type callSize struct {
+ calls uint64
+ size uint64
+}
+
+type callSizes []callSize
+
+func (ci callSizes) Len() int {
+ return len(ci)
+}
+
+func (ci callSizes) Less(i, j int) bool {
+ return ci[i].calls > ci[j].calls
+}
+
+func (ci callSizes) Swap(i, j int) {
+ ci[i], ci[j] = ci[j], ci[i]
+}
+
+func index(n int) int {
+ n--
+ n >>= minBitSize
+ idx := 0
+ for n > 0 {
+ n >>= 1
+ idx++
+ }
+ if idx >= steps {
+ idx = steps - 1
+ }
+ return idx
+}
+
+var DefaultBufferPool *BufferPool
+
+func init() {
+ DefaultBufferPool = NewBufferPool()
+}
+
+func Stats() PoolStats {
+ return DefaultBufferPool.Stats()
+}
+
+func Get() *Buffer {
+ return DefaultBufferPool.Get()
+}
+
+func Put(buf *Buffer) {
+ DefaultBufferPool.Put(buf)
+}
diff --git a/mem/pool_test.go b/mem/pool_test.go
new file mode 100644
index 00000000..c194536c
--- /dev/null
+++ b/mem/pool_test.go
@@ -0,0 +1,94 @@
+package mem
+
+import (
+ "math/rand"
+ "testing"
+ "time"
+)
+
+func TestIndex(t *testing.T) {
+ testIndex(t, 0, 0)
+ testIndex(t, 1, 0)
+
+ testIndex(t, minSize-1, 0)
+ testIndex(t, minSize, 0)
+ testIndex(t, minSize+1, 1)
+
+ testIndex(t, 2*minSize-1, 1)
+ testIndex(t, 2*minSize, 1)
+ testIndex(t, 2*minSize+1, 2)
+
+ testIndex(t, maxSize-1, steps-1)
+ testIndex(t, maxSize, steps-1)
+ testIndex(t, maxSize+1, steps-1)
+}
+
+func testIndex(t *testing.T, n, expectedIdx int) {
+ idx := index(n)
+ if idx != expectedIdx {
+ t.Fatalf("unexpected idx for n=%d: %d. Expecting %d", n, idx, expectedIdx)
+ }
+}
+
+func TestPoolCalibrate(t *testing.T) {
+ for i := 0; i < steps*calibrateCallsThreshold; i++ {
+ n := 1004
+ if i%15 == 0 {
+ n = rand.Intn(15234)
+ }
+ testGetPut(t, n)
+ }
+}
+
+func TestPoolVariousSizesSerial(t *testing.T) {
+ testPoolVariousSizes(t)
+}
+
+func TestPoolVariousSizesConcurrent(t *testing.T) {
+ concurrency := 5
+ ch := make(chan struct{})
+ for i := 0; i < concurrency; i++ {
+ go func() {
+ testPoolVariousSizes(t)
+ ch <- struct{}{}
+ }()
+ }
+ for i := 0; i < concurrency; i++ {
+ select {
+ case <-ch:
+ case <-time.After(3 * time.Second):
+ t.Fatalf("timeout")
+ }
+ }
+}
+
+func testPoolVariousSizes(t *testing.T) {
+ for i := 0; i < steps+1; i++ {
+ n := (1 << uint32(i))
+
+ testGetPut(t, n)
+ testGetPut(t, n+1)
+ testGetPut(t, n-1)
+
+ for j := 0; j < 10; j++ {
+ testGetPut(t, j+n)
+ }
+ }
+}
+
+func testGetPut(t *testing.T, n int) {
+ bb := Get()
+ if len(bb.data) > 0 {
+ t.Fatalf("non-empty byte buffer returned from acquire")
+ }
+ bb.data = allocNBytes(bb.data, n)
+ Put(bb)
+}
+
+func allocNBytes(dst []byte, n int) []byte {
+ diff := n - cap(dst)
+ if diff <= 0 {
+ return dst[:n]
+ }
+ return append(dst, make([]byte, diff)...)
+}
diff --git a/monitor/cpu.go b/monitor/cpu.go
index 83869653..c2b9ca86 100644
--- a/monitor/cpu.go
+++ b/monitor/cpu.go
@@ -2,7 +2,6 @@ package monitor
import (
"github.com/datarhei/core/v16/monitor/metric"
- "github.com/datarhei/core/v16/psutil"
"github.com/datarhei/core/v16/resources"
)
@@ -15,13 +14,11 @@ type cpuCollector struct {
limitDescr *metric.Description
throttleDescr *metric.Description
- ncpu float64
resources resources.Resources
}
func NewCPUCollector(rsc resources.Resources) metric.Collector {
c := &cpuCollector{
- ncpu: 1,
resources: rsc,
}
@@ -33,10 +30,6 @@ func NewCPUCollector(rsc resources.Resources) metric.Collector {
c.limitDescr = metric.NewDesc("cpu_limit", "Percentage of CPU to be consumed", nil)
c.throttleDescr = metric.NewDesc("cpu_throttling", "Whether the CPU is currently throttled", nil)
- if ncpu, err := psutil.CPUCounts(true); err == nil {
- c.ncpu = ncpu
- }
-
return c
}
@@ -61,29 +54,23 @@ func (c *cpuCollector) Describe() []*metric.Description {
func (c *cpuCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
- metrics.Add(metric.NewValue(c.ncpuDescr, c.ncpu))
+ rinfo := c.resources.Info()
- limit, _ := c.resources.Limits()
+ metrics.Add(metric.NewValue(c.ncpuDescr, rinfo.CPU.NCPU))
- metrics.Add(metric.NewValue(c.limitDescr, limit))
+ metrics.Add(metric.NewValue(c.limitDescr, rinfo.CPU.Limit))
- cpu, _ := c.resources.ShouldLimit()
throttling := .0
- if cpu {
+ if rinfo.CPU.Throttling {
throttling = 1
}
metrics.Add(metric.NewValue(c.throttleDescr, throttling))
- stat, err := psutil.CPUPercent()
- if err != nil {
- return metrics
- }
-
- metrics.Add(metric.NewValue(c.systemDescr, stat.System))
- metrics.Add(metric.NewValue(c.userDescr, stat.User))
- metrics.Add(metric.NewValue(c.idleDescr, stat.Idle))
- metrics.Add(metric.NewValue(c.otherDescr, stat.Other))
+ metrics.Add(metric.NewValue(c.systemDescr, rinfo.CPU.System))
+ metrics.Add(metric.NewValue(c.userDescr, rinfo.CPU.User))
+ metrics.Add(metric.NewValue(c.idleDescr, rinfo.CPU.Idle))
+ metrics.Add(metric.NewValue(c.otherDescr, rinfo.CPU.Other))
return metrics
}
diff --git a/monitor/disk.go b/monitor/disk.go
index 7e1ba86d..562d5539 100644
--- a/monitor/disk.go
+++ b/monitor/disk.go
@@ -2,19 +2,21 @@ package monitor
import (
"github.com/datarhei/core/v16/monitor/metric"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
)
type diskCollector struct {
- path string
+ path string
+ resources resources.Resources
totalDescr *metric.Description
usageDescr *metric.Description
}
-func NewDiskCollector(path string) metric.Collector {
+func NewDiskCollector(path string, rsc resources.Resources) metric.Collector {
c := &diskCollector{
- path: path,
+ path: path,
+ resources: rsc,
}
c.totalDescr = metric.NewDesc("disk_total", "Total size of the disk in bytes", []string{"path"})
@@ -37,7 +39,7 @@ func (c *diskCollector) Describe() []*metric.Description {
func (c *diskCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
- stat, err := psutil.DiskUsage(c.path)
+ stat, err := c.resources.Disk(c.path)
if err != nil {
return metrics
}
diff --git a/monitor/gpu.go b/monitor/gpu.go
new file mode 100644
index 00000000..c0fcb9f9
--- /dev/null
+++ b/monitor/gpu.go
@@ -0,0 +1,79 @@
+package monitor
+
+import (
+ "fmt"
+
+ "github.com/datarhei/core/v16/monitor/metric"
+ "github.com/datarhei/core/v16/resources"
+)
+
+type gpuCollector struct {
+ ngpuDescr *metric.Description
+ usageDescr *metric.Description
+ encoderDescr *metric.Description
+ decoderDescr *metric.Description
+ memoryTotalDescr *metric.Description
+ memoryFreeDescr *metric.Description
+ memoryLimitDescr *metric.Description
+ limitDescr *metric.Description
+
+ resources resources.Resources
+}
+
+func NewGPUCollector(rsc resources.Resources) metric.Collector {
+ c := &gpuCollector{
+ resources: rsc,
+ }
+
+ c.ngpuDescr = metric.NewDesc("gpu_ngpu", "Number of GPUs in the system", nil)
+ c.usageDescr = metric.NewDesc("gpu_usage", "Percentage of GPU used ", []string{"index"})
+ c.encoderDescr = metric.NewDesc("gpu_encoder", "Percentage of GPU encoder used", []string{"index"})
+ c.decoderDescr = metric.NewDesc("gpu_decoder", "Percentage of GPU decoder used", []string{"index"})
+ c.memoryTotalDescr = metric.NewDesc("gpu_mem_total", "GPU memory total in bytes", []string{"index"})
+ c.memoryFreeDescr = metric.NewDesc("gpu_mem_free", "GPU memory available in bytes", []string{"index"})
+ c.memoryLimitDescr = metric.NewDesc("gpu_mem_limit", "GPU memory limit in bytes", []string{"index"})
+ c.limitDescr = metric.NewDesc("gpu_limit", "Percentage of GPU to be consumed", []string{"index"})
+
+ return c
+}
+
+func (c *gpuCollector) Stop() {}
+
+func (c *gpuCollector) Prefix() string {
+ return "cpu"
+}
+
+func (c *gpuCollector) Describe() []*metric.Description {
+ return []*metric.Description{
+ c.ngpuDescr,
+ c.usageDescr,
+ c.encoderDescr,
+ c.decoderDescr,
+ c.memoryTotalDescr,
+ c.memoryFreeDescr,
+ c.memoryLimitDescr,
+ c.limitDescr,
+ }
+}
+
+func (c *gpuCollector) Collect() metric.Metrics {
+ metrics := metric.NewMetrics()
+
+ rinfo := c.resources.Info()
+
+ metrics.Add(metric.NewValue(c.ngpuDescr, rinfo.GPU.NGPU))
+
+ for i, gpu := range rinfo.GPU.GPU {
+ index := fmt.Sprintf("%d", i)
+ metrics.Add(metric.NewValue(c.usageDescr, gpu.Usage, index))
+ metrics.Add(metric.NewValue(c.encoderDescr, gpu.Encoder, index))
+ metrics.Add(metric.NewValue(c.decoderDescr, gpu.Decoder, index))
+ metrics.Add(metric.NewValue(c.limitDescr, gpu.UsageLimit, index))
+
+ metrics.Add(metric.NewValue(c.memoryTotalDescr, float64(gpu.MemoryTotal), index))
+ metrics.Add(metric.NewValue(c.memoryFreeDescr, float64(gpu.MemoryAvailable), index))
+ metrics.Add(metric.NewValue(c.memoryLimitDescr, float64(gpu.MemoryLimit), index))
+ }
+
+ return metrics
+}
diff --git a/monitor/http.go b/monitor/http.go
index cd3d82b6..2330d4d1 100644
--- a/monitor/http.go
+++ b/monitor/http.go
@@ -1,7 +1,7 @@
package monitor
import (
- "strconv"
+ "strings"
"github.com/datarhei/core/v16/http/server"
"github.com/datarhei/core/v16/monitor/metric"
@@ -19,7 +19,7 @@ func NewHTTPCollector(name string, handler server.Server) metric.Collector {
name: name,
}
- c.statusDescr = metric.NewDesc("http_status", "Total return status", []string{"name", "code"})
+ c.statusDescr = metric.NewDesc("http_status", "Total return status count", []string{"name", "code", "method", "path"})
return c
}
@@ -39,8 +39,9 @@ func (c *httpCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
- for code, count := range status {
- metrics.Add(metric.NewValue(c.statusDescr, float64(count), c.name, strconv.Itoa(code)))
+ for key, count := range status {
+ vals := strings.SplitN(key, ":", 3)
+ metrics.Add(metric.NewValue(c.statusDescr, float64(count), c.name, vals[0], vals[1], vals[2]))
}
return metrics
diff --git a/monitor/mem.go b/monitor/mem.go
index 10a66f7f..4f6b97d8 100644
--- a/monitor/mem.go
+++ b/monitor/mem.go
@@ -2,7 +2,6 @@ package monitor
import (
"github.com/datarhei/core/v16/monitor/metric"
- "github.com/datarhei/core/v16/psutil"
"github.com/datarhei/core/v16/resources"
)
@@ -44,25 +43,19 @@ func (c *memCollector) Describe() []*metric.Description {
func (c *memCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
- _, limit := c.resources.Limits()
+ rinfo := c.resources.Info()
- metrics.Add(metric.NewValue(c.limitDescr, float64(limit)))
+ metrics.Add(metric.NewValue(c.limitDescr, float64(rinfo.Mem.Limit)))
- _, memory := c.resources.ShouldLimit()
throttling := .0
- if memory {
+ if rinfo.Mem.Throttling {
throttling = 1
}
metrics.Add(metric.NewValue(c.throttleDescr, throttling))
- stat, err := psutil.VirtualMemory()
- if err != nil {
- return metrics
- }
-
- metrics.Add(metric.NewValue(c.totalDescr, float64(stat.Total)))
- metrics.Add(metric.NewValue(c.freeDescr, float64(stat.Available)))
+ metrics.Add(metric.NewValue(c.totalDescr, float64(rinfo.Mem.Total)))
+ metrics.Add(metric.NewValue(c.freeDescr, float64(rinfo.Mem.Available)))
return metrics
}
diff --git a/monitor/net.go b/monitor/net.go
index 87b2b8a3..f961aa1e 100644
--- a/monitor/net.go
+++ b/monitor/net.go
@@ -2,16 +2,20 @@ package monitor
import (
"github.com/datarhei/core/v16/monitor/metric"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
)
type netCollector struct {
rxDescr *metric.Description
txDescr *metric.Description
+
+ resources resources.Resources
}
-func NewNetCollector() metric.Collector {
- c := &netCollector{}
+func NewNetCollector(rsc resources.Resources) metric.Collector {
+ c := &netCollector{
+ resources: rsc,
+ }
c.rxDescr = metric.NewDesc("net_rx", "Number of received bytes", []string{"interface"})
c.txDescr = metric.NewDesc("net_tx", "Number of transmitted bytes", []string{"interface"})
@@ -33,7 +37,7 @@ func (c *netCollector) Describe() []*metric.Description {
func (c *netCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
- devs, err := psutil.NetIOCounters(true)
+ devs, err := c.resources.Network()
if err != nil {
return metrics
}
diff --git a/monitor/self.go b/monitor/self.go
new file mode 100644
index 00000000..1964340b
--- /dev/null
+++ b/monitor/self.go
@@ -0,0 +1,100 @@
+package monitor
+
+import (
+ "runtime"
+
+ "github.com/datarhei/core/v16/mem"
+ "github.com/datarhei/core/v16/monitor/metric"
+)
+
+type selfCollector struct {
+ bufferAllocDescr *metric.Description
+ bufferReuseDescr *metric.Description
+ bufferRecycleDescr *metric.Description
+ bufferDumpDescr *metric.Description
+ bufferDefaultSizeDescr *metric.Description
+ bufferMaxSizeDescr *metric.Description
+
+ heapAllocDescr *metric.Description
+ totalAllocDescr *metric.Description
+ heapSysDescr *metric.Description
+ heapIdleDescr *metric.Description
+ heapInuseDescr *metric.Description
+ heapObjectsDescr *metric.Description
+ mallocsDescr *metric.Description
+ freesDescr *metric.Description
+}
+
+func NewSelfCollector() metric.Collector {
+ c := &selfCollector{}
+
+ c.bufferAllocDescr = metric.NewDesc("self_bufferpool_alloc", "Number of buffer allocations", nil)
+ c.bufferReuseDescr = metric.NewDesc("self_bufferpool_reuse", "Number of buffer reuses", nil)
+ c.bufferRecycleDescr = metric.NewDesc("self_bufferpool_recycle", "Number of buffer recycles", nil)
+ c.bufferDumpDescr = metric.NewDesc("self_bufferpool_dump", "Number of buffer dumps", nil)
+ c.bufferDefaultSizeDescr = metric.NewDesc("self_bufferpool_default_size", "Default buffer size", nil)
+ c.bufferMaxSizeDescr = metric.NewDesc("self_bufferpool_max_size", "Max. buffer size for recycling", nil)
+
+ c.heapAllocDescr = metric.NewDesc("self_mem_heap_alloc_bytes", "Number of bytes allocated on the heap", nil)
+ c.totalAllocDescr = metric.NewDesc("self_mem_total_alloc_bytes", "Cumulative count of bytes allocated since start", nil)
+ c.heapSysDescr = metric.NewDesc("self_mem_heap_sys_bytes", "Number of bytes obtained from OS", nil)
+ c.heapIdleDescr = metric.NewDesc("self_mem_heap_idle_bytes", "Number of unused bytes", nil)
+ c.heapInuseDescr = metric.NewDesc("self_mem_heap_inuse_bytes", "Number of used bytes", nil)
+ c.heapObjectsDescr = metric.NewDesc("self_mem_heap_objects", "Number of objects in heap", nil)
+ c.mallocsDescr = metric.NewDesc("self_mem_mallocs", "Cumulative count of heap objects allocated", nil)
+ c.freesDescr = metric.NewDesc("self_mem_frees", "Cumulative count of heap objects freed", nil)
+
+ return c
+}
+
+func (c *selfCollector) Stop() {}
+
+func (c *selfCollector) Prefix() string {
+ return "bufferpool"
+}
+
+func (c *selfCollector) Describe() []*metric.Description {
+ return []*metric.Description{
+ c.bufferAllocDescr,
+ c.bufferReuseDescr,
+ c.bufferRecycleDescr,
+ c.bufferDumpDescr,
+ c.bufferDefaultSizeDescr,
+ c.bufferMaxSizeDescr,
+ c.heapAllocDescr,
+ c.totalAllocDescr,
+ c.heapSysDescr,
+ c.heapIdleDescr,
+ c.heapInuseDescr,
+ c.heapObjectsDescr,
+ c.mallocsDescr,
+ c.freesDescr,
+ }
+}
+
+func (c *selfCollector) Collect() metric.Metrics {
+ bufferstats := mem.Stats()
+
+ metrics := metric.NewMetrics()
+
+ metrics.Add(metric.NewValue(c.bufferAllocDescr, float64(bufferstats.Alloc)))
+ metrics.Add(metric.NewValue(c.bufferReuseDescr, float64(bufferstats.Reuse)))
+ metrics.Add(metric.NewValue(c.bufferRecycleDescr, float64(bufferstats.Recycle)))
+ metrics.Add(metric.NewValue(c.bufferDumpDescr, float64(bufferstats.Dump)))
+ metrics.Add(metric.NewValue(c.bufferDefaultSizeDescr, float64(bufferstats.DefaultSize)))
+ metrics.Add(metric.NewValue(c.bufferMaxSizeDescr, float64(bufferstats.MaxSize)))
+
+ memstats := runtime.MemStats{}
+ runtime.ReadMemStats(&memstats)
+
+ metrics.Add(metric.NewValue(c.heapAllocDescr, float64(memstats.HeapAlloc)))
+ metrics.Add(metric.NewValue(c.totalAllocDescr, float64(memstats.TotalAlloc)))
+ metrics.Add(metric.NewValue(c.heapSysDescr, float64(memstats.HeapSys)))
+ metrics.Add(metric.NewValue(c.heapIdleDescr, float64(memstats.HeapIdle)))
+ metrics.Add(metric.NewValue(c.heapInuseDescr, float64(memstats.HeapInuse)))
+ metrics.Add(metric.NewValue(c.heapObjectsDescr, float64(memstats.HeapObjects)))
+ metrics.Add(metric.NewValue(c.mallocsDescr, float64(memstats.Mallocs)))
+ metrics.Add(metric.NewValue(c.freesDescr, float64(memstats.Frees)))
+
+ return metrics
+}
diff --git a/process/limiter.go b/process/limiter.go
index 79f791e3..dfe90d60 100644
--- a/process/limiter.go
+++ b/process/limiter.go
@@ -7,7 +7,7 @@ import (
"time"
"github.com/datarhei/core/v16/log"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
)
type Usage struct {
@@ -25,9 +25,36 @@ type Usage struct {
Max uint64 // bytes
Limit uint64 // bytes
}
+ GPU struct {
+ Index int // number of the GPU
+ Memory struct {
+ Current uint64 // bytes
+ Average float64 // bytes
+ Max uint64 // bytes
+ Limit uint64 // bytes
+ }
+ Usage struct {
+ Current float64 // percent 0-100
+ Average float64 // percent 0-100
+ Max float64 // percent 0-100
+ Limit float64 // percent 0-100
+ }
+ Encoder struct {
+ Current float64 // percent 0-100
+ Average float64 // percent 0-100
+ Max float64 // percent 0-100
+ Limit float64 // percent 0-100
+ }
+ Decoder struct {
+ Current float64 // percent 0-100
+ Average float64 // percent 0-100
+ Max float64 // percent 0-100
+ Limit float64 // percent 0-100
+ }
+ }
}
-type LimitFunc func(cpu float64, memory uint64)
+type LimitFunc func(cpu float64, memory uint64, gpuusage, gpuencoder, gpudecoder float64, gpumemory uint64)
type LimitMode int
@@ -44,53 +71,164 @@ func (m LimitMode) String() string {
}
const (
- LimitModeHard LimitMode = 0 // Killing the process if either CPU or memory is above the limit for a certain time
- LimitModeSoft LimitMode = 1 // Throttling the CPU if activated, killing the process if memory is above the limit for a certain time
+ LimitModeHard LimitMode = 0 // Killing the process if either resource is above the limit for a certain time.
+ LimitModeSoft LimitMode = 1 // If activated, will throttle the CPU, otherwise killing the process if resources are above the limit.
)
type LimiterConfig struct {
- CPU float64 // Max. CPU usage in percent 0-100 in hard mode, 0-100*ncpu in softmode
- Memory uint64 // Max. memory usage in bytes
- WaitFor time.Duration // Duration for one of the limits has to be above the limit until OnLimit gets triggered
- OnLimit LimitFunc // Function to be triggered if limits are exceeded
- Mode LimitMode // How to limit CPU usage
- PSUtil psutil.Util
- Logger log.Logger
+ CPU float64 // Max. CPU usage in percent 0-100 in hard mode, 0-100*ncpu in soft mode.
+ Memory uint64 // Max. memory usage in bytes.
+ GPUUsage float64 // Max. GPU general usage in percent 0-100.
+ GPUEncoder float64 // Max. GPU encoder usage in percent 0-100.
+ GPUDecoder float64 // Max. GPU decoder usage in percent 0-100.
+ GPUMemory uint64 // Max. GPU memory usage in bytes.
+ WaitFor time.Duration // Duration for one of the limits has to be above the limit until OnLimit gets triggered.
+ OnLimit LimitFunc // Function to be triggered if limits are exceeded.
+ Mode LimitMode // How to limit CPU usage.
+ NCPU float64 // Number of available CPU
+ Logger log.Logger
}
type Limiter interface {
// Start starts the limiter with a psutil.Process.
- Start(process psutil.Process) error
+ Start(process resources.Process) error
// Stop stops the limiter. The limiter can be reused by calling Start() again
Stop()
- // Current returns the current CPU and memory values
- // Deprecated: use Usage()
- Current() (cpu float64, memory uint64)
-
- // Limits returns the defined CPU and memory limits. Values <= 0 means no limit
- // Deprecated: use Usage()
- Limits() (cpu float64, memory uint64)
-
// Usage returns the current state of the limiter, such as current, average, max, and
// limit values for CPU and memory.
Usage() Usage
// Limit enables or disables the throttling of the CPU or killing because of to much
- // memory consumption.
- Limit(cpu, memory bool) error
+ // memory or GPU consumption.
+ Limit(cpu, memory, gpu bool) error
// Mode returns in which mode the limiter is running in.
Mode() LimitMode
}
-type limiter struct {
- psutil psutil.Util
+type numbers interface {
+ ~uint64 | ~float64
+}
+
+type metric[T numbers] struct {
+ limit T // Limit
+ current T // Current load value
+ last T // Last load value
+ max T // Max. load value
+ top T // Decaying max. load value
+ avg float64 // Average load value
+ avgCounter uint64 // Counter for average calculation
+ limitSince time.Time // Time when the limit has been reached (hard limiter mode)
+ limitEnable bool
+}
+
+func (x *metric[T]) Reset() {
+ var zero T
+
+ x.current = zero
+ x.last = zero
+ x.max = zero
+ x.top = zero
+ x.avg = 0
+ x.avgCounter = 0
+ x.limitEnable = false
+}
+
+func (x *metric[T]) Current() T {
+ return x.current
+}
+
+func (x *metric[T]) Top() T {
+ return x.top
+}
+
+func (x *metric[T]) Max() T {
+ return x.max
+}
+
+func (x *metric[T]) Avg() float64 {
+ return x.avg
+}
+
+func (x *metric[T]) SetLimit(limit T) {
+ x.limit = limit
+}
+
+func (x *metric[T]) Limit() T {
+ return x.limit
+}
+
+func (x *metric[T]) DoLimit(limit bool) (enabled, changed bool) {
+ if x.limitEnable != limit {
+ x.limitEnable = limit
+ changed = true
+ }
+
+ enabled = x.limitEnable
+
+ return
+}
+
+func (x *metric[T]) IsLimitEnabled() bool {
+ return x.limitEnable
+}
+func (x *metric[T]) Update(value T) {
+ x.last, x.current = x.current, value
+
+ if x.current > x.max {
+ x.max = x.current
+ }
+
+ if x.current > x.top {
+ x.top = x.current
+ } else {
+ x.top = T(float64(x.top) * 0.95)
+ }
+
+ x.avgCounter++
+
+ x.avg = ((x.avg * float64(x.avgCounter-1)) + float64(x.current)) / float64(x.avgCounter)
+}
+
+func (x *metric[T]) IsExceeded(waitFor time.Duration, mode LimitMode) bool {
+ if x.limit <= 0 {
+ return false
+ }
+
+ if mode == LimitModeSoft {
+ // Check if we actually should limit.
+ if !x.limitEnable {
+ return false
+ }
+
+ // If we are currently above the limit, the limit is exceeded.
+ if x.current > x.limit {
+ return true
+ }
+ } else {
+ if x.current > x.limit {
+ // Current value is higher than the limit.
+ if x.last <= x.limit {
+ // If the previous value is below the limit, then we reached the limit as of now.
+ x.limitSince = time.Now()
+ }
+
+ if time.Since(x.limitSince) >= waitFor {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+type limiter struct {
ncpu float64
ncpuFactor float64
- proc psutil.Process
+ proc resources.Process
lock sync.RWMutex
cancel context.CancelFunc
onLimit LimitFunc
@@ -98,109 +236,96 @@ type limiter struct {
lastUsage Usage
lastUsageLock sync.RWMutex
- cpu float64 // CPU limit
- cpuCurrent float64 // Current CPU load of this process
- cpuLast float64 // Last CPU load of this process
- cpuMax float64 // Max. CPU load of this process
- cpuTop float64 // Decaying max. CPU load of this process
- cpuAvg float64 // Average CPU load of this process
- cpuAvgCounter uint64 // Counter for average calculation
- cpuLimitSince time.Time // Time when the CPU limit has been reached (hard limiter mode)
- cpuLimitEnable bool // Whether CPU throttling is enabled (soft limiter mode)
- cpuThrottling bool // Whether CPU throttling is currently active (soft limiter mode)
-
- memory uint64 // Memory limit (bytes)
- memoryCurrent uint64 // Current memory usage
- memoryLast uint64 // Last memory usage
- memoryMax uint64 // Max. memory usage
- memoryTop uint64 // Decaying max. memory usage
- memoryAvg float64 // Average memory usage
- memoryAvgCounter uint64 // Counter for average memory calculation
- memoryLimitSince time.Time // Time when the memory limit has been reached (hard limiter mode)
- memoryLimitEnable bool // Whether memory limiting is enabled (soft limiter mode)
+ cpu metric[float64] // CPU limit
+ cpuThrottling bool // Whether CPU throttling is currently active (soft limiter mode)
+
+ memory metric[uint64] // Memory limit (bytes)
+
+ gpu struct {
+ memory metric[uint64] // GPU memory limit (0-100 percent)
+ usage metric[float64] // GPU load limit (0-100 percent)
+ encoder metric[float64] // GPU encoder limit (0-100 percent)
+ decoder metric[float64] // GPU decoder limit (0-100 percent)
+ }
waitFor time.Duration
mode LimitMode
- cancelLimit context.CancelFunc
-
logger log.Logger
}
// NewLimiter returns a new Limiter
-func NewLimiter(config LimiterConfig) Limiter {
+func NewLimiter(config LimiterConfig) (Limiter, error) {
l := &limiter{
- cpu: config.CPU,
- memory: config.Memory,
+ ncpu: config.NCPU,
waitFor: config.WaitFor,
onLimit: config.OnLimit,
mode: config.Mode,
- psutil: config.PSUtil,
logger: config.Logger,
}
- if l.logger == nil {
- l.logger = log.New("")
+ if l.ncpu <= 0 {
+ l.ncpu = 1
}
- if l.psutil == nil {
- l.psutil = psutil.DefaultUtil
- }
+ l.cpu.SetLimit(config.CPU / 100)
+ l.memory.SetLimit(config.Memory)
+ l.gpu.memory.SetLimit(config.GPUMemory)
+ l.gpu.usage.SetLimit(config.GPUUsage / 100)
+ l.gpu.encoder.SetLimit(config.GPUEncoder / 100)
+ l.gpu.decoder.SetLimit(config.GPUDecoder / 100)
- if ncpu, err := l.psutil.CPUCounts(true); err != nil {
- l.ncpu = 1
- } else {
- l.ncpu = ncpu
+ if l.logger == nil {
+ l.logger = log.New("")
}
l.lastUsage.CPU.NCPU = l.ncpu
- l.lastUsage.CPU.Limit = l.cpu * l.ncpu
- l.lastUsage.Memory.Limit = l.memory
+ l.lastUsage.CPU.Limit = l.cpu.Limit() * 100 * l.ncpu
+ l.lastUsage.Memory.Limit = l.memory.Limit()
+ l.lastUsage.GPU.Memory.Limit = l.gpu.memory.Limit()
+ l.lastUsage.GPU.Usage.Limit = l.gpu.usage.Limit() * 100
+ l.lastUsage.GPU.Encoder.Limit = l.gpu.encoder.Limit() * 100
+ l.lastUsage.GPU.Decoder.Limit = l.gpu.decoder.Limit() * 100
l.ncpuFactor = 1
mode := "hard"
if l.mode == LimitModeSoft {
mode = "soft"
- l.cpu /= l.ncpu
+ l.cpu.SetLimit(l.cpu.Limit() / l.ncpu)
l.ncpuFactor = l.ncpu
}
- l.cpu /= 100
-
if l.onLimit == nil {
- l.onLimit = func(float64, uint64) {}
+ l.onLimit = func(float64, uint64, float64, float64, float64, uint64) {}
}
l.logger = l.logger.WithFields(log.Fields{
- "cpu": l.cpu * l.ncpuFactor,
- "memory": l.memory,
- "mode": mode,
+ "cpu": l.cpu.Limit() * l.ncpuFactor,
+ "memory": l.memory.Limit(),
+ "gpumemory": l.gpu.memory.Limit(),
+ "gpuusage": l.gpu.usage.Limit(),
+ "gpuencoder": l.gpu.encoder.Limit(),
+ "gpudecoder": l.gpu.decoder.Limit(),
+ "mode": mode,
})
- return l
+ return l, nil
}
func (l *limiter) reset() {
- l.cpuCurrent = 0
- l.cpuLast = 0
- l.cpuAvg = 0
- l.cpuAvgCounter = 0
- l.cpuMax = 0
- l.cpuTop = 0
- l.cpuLimitEnable = false
+ l.cpu.Reset()
l.cpuThrottling = false
- l.memoryCurrent = 0
- l.memoryLast = 0
- l.memoryAvg = 0
- l.memoryAvgCounter = 0
- l.memoryMax = 0
- l.memoryTop = 0
- l.memoryLimitEnable = false
+ l.memory.Reset()
+
+ l.gpu.memory.Reset()
+ l.gpu.usage.Reset()
+ l.gpu.encoder.Reset()
+ l.gpu.decoder.Reset()
}
-func (l *limiter) Start(process psutil.Process) error {
+func (l *limiter) Start(process resources.Process) error {
l.lock.Lock()
defer l.lock.Unlock()
@@ -218,10 +343,7 @@ func (l *limiter) Start(process psutil.Process) error {
go l.ticker(ctx, time.Second)
if l.mode == LimitModeSoft {
- ctx, cancel = context.WithCancel(context.Background())
- l.cancelLimit = cancel
-
- go l.limitCPU(ctx, l.cpu, time.Second)
+ go l.limitCPU(ctx, l.cpu.Limit(), time.Second)
}
return nil
@@ -237,12 +359,7 @@ func (l *limiter) Stop() {
l.cancel()
- if l.cancelLimit != nil {
- l.cancelLimit()
- l.cancelLimit = nil
- }
-
- l.proc.Stop()
+ l.proc.Cancel()
l.proc = nil
l.reset()
@@ -256,13 +373,13 @@ func (l *limiter) ticker(ctx context.Context, interval time.Duration) {
select {
case <-ctx.Done():
return
- case t := <-ticker.C:
- l.collect(t)
+ case <-ticker.C:
+ l.collect()
}
}
}
-func (l *limiter) collect(_ time.Time) {
+func (l *limiter) collect() {
l.lock.Lock()
proc := l.proc
l.lock.Unlock()
@@ -271,118 +388,104 @@ func (l *limiter) collect(_ time.Time) {
return
}
- mstat, merr := proc.VirtualMemory()
- cpustat, cerr := proc.CPUPercent()
-
- l.lock.Lock()
+ pinfo, err := proc.Info()
- if merr == nil {
- l.memoryLast, l.memoryCurrent = l.memoryCurrent, mstat
+ //mstat, merr := proc.Memory()
+ //cpustat, cerr := proc.CPU()
+ //gstat, gerr := proc.GPU()
+ gindex := -1
- if l.memoryCurrent > l.memoryMax {
- l.memoryMax = l.memoryCurrent
- }
-
- if l.memoryCurrent > l.memoryTop {
- l.memoryTop = l.memoryCurrent
- } else {
- l.memoryTop = uint64(float64(l.memoryTop) * 0.95)
- }
-
- l.memoryAvgCounter++
+ l.lock.Lock()
+ defer l.lock.Unlock()
- l.memoryAvg = ((l.memoryAvg * float64(l.memoryAvgCounter-1)) + float64(l.memoryCurrent)) / float64(l.memoryAvgCounter)
+ if err == nil {
+ l.memory.Update(pinfo.Memory)
+ l.cpu.Update((pinfo.CPU.System + pinfo.CPU.User + pinfo.CPU.Other) / 100)
+ l.gpu.memory.Update(pinfo.GPU.MemoryUsed)
+ l.gpu.usage.Update(pinfo.GPU.Usage / 100)
+ l.gpu.encoder.Update(pinfo.GPU.Encoder / 100)
+ l.gpu.decoder.Update(pinfo.GPU.Decoder / 100)
+ gindex = pinfo.GPU.Index
}
- if cerr == nil {
- l.cpuLast, l.cpuCurrent = l.cpuCurrent, (cpustat.System+cpustat.User+cpustat.Other)/100
-
- if l.cpuCurrent > l.cpuMax {
- l.cpuMax = l.cpuCurrent
- }
+ isLimitExceeded := false
- if l.cpuCurrent > l.cpuTop {
- l.cpuTop = l.cpuCurrent
- } else {
- l.cpuTop = l.cpuTop * 0.95
+ if l.mode == LimitModeHard {
+ if l.cpu.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("CPU limit exceeded")
+ isLimitExceeded = true
}
-
- l.cpuAvgCounter++
-
- l.cpuAvg = ((l.cpuAvg * float64(l.cpuAvgCounter-1)) + l.cpuCurrent) / float64(l.cpuAvgCounter)
}
- isLimitExceeded := false
+ if l.memory.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("Memory limit exceeded")
+ isLimitExceeded = true
+ }
- if l.mode == LimitModeHard {
- if l.cpu > 0 {
- if l.cpuCurrent > l.cpu {
- // Current value is higher than the limit
- if l.cpuLast <= l.cpu {
- // If the previous value is below the limit, then we reached the
- // limit as of now
- l.cpuLimitSince = time.Now()
- }
+ if l.gpu.memory.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("GPU memory limit exceeded")
+ isLimitExceeded = true
+ }
- if time.Since(l.cpuLimitSince) >= l.waitFor {
- l.logger.Warn().Log("CPU limit exceeded")
- isLimitExceeded = true
- }
- }
- }
+ if l.gpu.usage.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("GPU usage limit exceeded")
+ isLimitExceeded = true
+ }
- if l.memory > 0 {
- if l.memoryCurrent > l.memory {
- // Current value is higher than the limit
- if l.memoryLast <= l.memory {
- // If the previous value is below the limit, then we reached the
- // limit as of now
- l.memoryLimitSince = time.Now()
- }
+ if l.gpu.encoder.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("GPU encoder limit exceeded")
+ isLimitExceeded = true
+ }
- if time.Since(l.memoryLimitSince) >= l.waitFor {
- l.logger.Warn().Log("Memory limit exceeded")
- isLimitExceeded = true
- }
- }
- }
- } else {
- if l.memory > 0 && l.memoryLimitEnable {
- if l.memoryCurrent > l.memory {
- // Current value is higher than the limit
- l.logger.Warn().Log("Memory limit exceeded")
- isLimitExceeded = true
- }
- }
+ if l.gpu.decoder.IsExceeded(l.waitFor, l.mode) {
+ l.logger.Warn().Log("GPU decoder limit exceeded")
+ isLimitExceeded = true
}
l.logger.Debug().WithFields(log.Fields{
- "cur_cpu": l.cpuCurrent * l.ncpuFactor,
- "top_cpu": l.cpuTop * l.ncpuFactor,
- "cur_mem": l.memoryCurrent,
- "top_mem": l.memoryTop,
- "exceeded": isLimitExceeded,
+ "cur_cpu": l.cpu.Current() * l.ncpuFactor,
+ "top_cpu": l.cpu.Top() * l.ncpuFactor,
+ "cur_mem": l.memory.Current(),
+ "top_mem": l.memory.Top(),
+ "cur_gpu_mem": l.gpu.memory.Current(),
+ "top_gpu_mem": l.gpu.memory.Top(),
+ "exceeded": isLimitExceeded,
}).Log("Observation")
if isLimitExceeded {
- go l.onLimit(l.cpuCurrent*l.ncpuFactor*100, l.memoryCurrent)
+ go l.onLimit(l.cpu.Current()*l.ncpuFactor*100, l.memory.Current(), l.gpu.usage.Current(), l.gpu.encoder.Current(), l.gpu.decoder.Current(), l.gpu.memory.Current())
}
l.lastUsageLock.Lock()
- l.lastUsage.CPU.Current = l.cpuCurrent * l.ncpu * 100
- l.lastUsage.CPU.Average = l.cpuAvg * l.ncpu * 100
- l.lastUsage.CPU.Max = l.cpuMax * l.ncpu * 100
+ l.lastUsage.CPU.Current = l.cpu.Current() * l.ncpu * 100
+ l.lastUsage.CPU.Average = l.cpu.Avg() * l.ncpu * 100
+ l.lastUsage.CPU.Max = l.cpu.Max() * l.ncpu * 100
l.lastUsage.CPU.IsThrottling = l.cpuThrottling
- l.lastUsage.Memory.Current = l.memoryCurrent
- l.lastUsage.Memory.Average = l.memoryAvg
- l.lastUsage.Memory.Max = l.memoryMax
- l.lastUsageLock.Unlock()
+ l.lastUsage.Memory.Current = l.memory.Current()
+ l.lastUsage.Memory.Average = l.memory.Avg()
+ l.lastUsage.Memory.Max = l.memory.Max()
- l.lock.Unlock()
+ l.lastUsage.GPU.Index = gindex
+ l.lastUsage.GPU.Memory.Current = l.gpu.memory.Current()
+ l.lastUsage.GPU.Memory.Average = l.gpu.memory.Avg()
+ l.lastUsage.GPU.Memory.Max = l.gpu.memory.Max()
+
+ l.lastUsage.GPU.Usage.Current = l.gpu.usage.Current() * 100
+ l.lastUsage.GPU.Usage.Average = l.gpu.usage.Avg() * 100
+ l.lastUsage.GPU.Usage.Max = l.gpu.usage.Max() * 100
+
+ l.lastUsage.GPU.Encoder.Current = l.gpu.encoder.Current() * 100
+ l.lastUsage.GPU.Encoder.Average = l.gpu.encoder.Avg() * 100
+ l.lastUsage.GPU.Encoder.Max = l.gpu.encoder.Max() * 100
+
+ l.lastUsage.GPU.Decoder.Current = l.gpu.decoder.Current() * 100
+ l.lastUsage.GPU.Decoder.Average = l.gpu.decoder.Avg() * 100
+ l.lastUsage.GPU.Decoder.Max = l.gpu.decoder.Max() * 100
+ l.lastUsageLock.Unlock()
}
-func (l *limiter) Limit(cpu, memory bool) error {
+func (l *limiter) Limit(cpu, memory, gpu bool) error {
l.lock.Lock()
defer l.lock.Unlock()
@@ -390,41 +493,38 @@ func (l *limiter) Limit(cpu, memory bool) error {
return nil
}
- if memory {
- if !l.memoryLimitEnable {
- l.memoryLimitEnable = true
-
- l.logger.Debug().Log("Memory limiter enabled")
- }
- } else {
- if l.memoryLimitEnable {
- l.memoryLimitEnable = false
-
- l.logger.Debug().Log("Memory limiter disabled")
- }
+ enabled, changed := l.cpu.DoLimit(cpu)
+ if enabled && changed {
+ l.logger.Debug().Log("CPU limiter enabled")
+ } else if !enabled && changed {
+ l.logger.Debug().Log("CPU limiter disabled")
}
- if cpu {
- if !l.cpuLimitEnable {
- l.cpuLimitEnable = true
-
- l.logger.Debug().Log("CPU limiter enabled")
- }
- } else {
- if l.cpuLimitEnable {
- l.cpuLimitEnable = false
-
- l.logger.Debug().Log("CPU limiter disabled")
- }
+ enabled, changed = l.memory.DoLimit(memory)
+ if enabled && changed {
+ l.logger.Debug().Log("Memory limiter enabled")
+ } else if !enabled && changed {
+ l.logger.Debug().Log("Memory limiter disabled")
+ }
+ enabled, changed = l.gpu.memory.DoLimit(gpu)
+ if enabled && changed {
+ l.logger.Debug().Log("GPU limiter enabled")
+ } else if !enabled && changed {
+ l.logger.Debug().Log("GPU limiter disabled")
}
+ l.gpu.usage.DoLimit(gpu)
+ l.gpu.encoder.DoLimit(gpu)
+ l.gpu.decoder.DoLimit(gpu)
+
return nil
}
// limitCPU will limit the CPU usage of this process. The limit is the max. CPU usage
// normed to 0-1. The interval defines how long a time slot is that will be splitted
// into sleeping and working.
+// Inspired by https://github.com/opsengine/cpulimit
func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Duration) {
defer func() {
l.lock.Lock()
@@ -452,7 +552,7 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur
l.lock.Lock()
- if !l.cpuLimitEnable {
+ if !l.cpu.IsLimitEnabled() {
if factorTopLimit > 0 {
factorTopLimit -= 10
} else {
@@ -468,7 +568,7 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur
}
} else {
factorTopLimit = 100
- topLimit = l.cpuTop - limit
+ topLimit = l.cpu.Top() - limit
l.cpuThrottling = true
}
@@ -481,7 +581,7 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur
lim += (100 - factorTopLimit) / 100 * topLimit
}
- pcpu := l.cpuCurrent
+ pcpu := l.cpu.Current()
l.lock.Unlock()
@@ -525,16 +625,6 @@ func (l *limiter) limitCPU(ctx context.Context, limit float64, interval time.Dur
}
}
-func (l *limiter) Current() (cpu float64, memory uint64) {
- l.lastUsageLock.RLock()
- defer l.lastUsageLock.RUnlock()
-
- cpu = l.lastUsage.CPU.Current / l.ncpu
- memory = l.lastUsage.Memory.Current
-
- return
-}
-
func (l *limiter) Usage() Usage {
l.lastUsageLock.RLock()
defer l.lastUsageLock.RUnlock()
@@ -542,10 +632,6 @@ func (l *limiter) Usage() Usage {
return l.lastUsage
}
-func (l *limiter) Limits() (cpu float64, memory uint64) {
- return l.cpu * 100, l.memory
-}
-
func (l *limiter) Mode() LimitMode {
return l.mode
}
diff --git a/process/limiter_test.go b/process/limiter_test.go
index c9e31127..bcb11045 100644
--- a/process/limiter_test.go
+++ b/process/limiter_test.go
@@ -5,29 +5,37 @@ import (
"testing"
"time"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
- "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
-type psproc struct{}
-
-func (p *psproc) CPUPercent() (*psutil.CPUInfoStat, error) {
- return &psutil.CPUInfoStat{
- System: 50,
- User: 0,
- Idle: 0,
- Other: 0,
- }, nil
-}
-
-func (p *psproc) VirtualMemory() (uint64, error) {
- return 197, nil
+type proc struct{}
+
+func (p *proc) Info() (resources.ProcessInfo, error) {
+ info := resources.ProcessInfo{
+ CPU: resources.ProcessInfoCPU{
+ System: 50,
+ User: 0,
+ Idle: 0,
+ Other: 0,
+ },
+ Memory: 197,
+ GPU: resources.ProcessInfoGPU{
+ Index: 0,
+ MemoryUsed: 91,
+ Usage: 3,
+ Encoder: 9,
+ Decoder: 5,
+ },
+ }
+
+ return info, nil
}
-func (p *psproc) Stop() {}
-func (p *psproc) Suspend() error { return nil }
-func (p *psproc) Resume() error { return nil }
+func (p *proc) Cancel() {}
+func (p *proc) Suspend() error { return nil }
+func (p *proc) Resume() error { return nil }
func TestCPULimit(t *testing.T) {
lock := sync.Mutex{}
@@ -40,14 +48,14 @@ func TestCPULimit(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
- l := NewLimiter(LimiterConfig{
+ l, _ := NewLimiter(LimiterConfig{
CPU: 42,
- OnLimit: func(float64, uint64) {
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
wg.Done()
},
})
- l.Start(&psproc{})
+ l.Start(&proc{})
defer l.Stop()
wg.Wait()
@@ -57,7 +65,7 @@ func TestCPULimit(t *testing.T) {
lock.Unlock()
}()
- assert.Eventually(t, func() bool {
+ require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
@@ -76,15 +84,15 @@ func TestCPULimitWaitFor(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
- l := NewLimiter(LimiterConfig{
+ l, _ := NewLimiter(LimiterConfig{
CPU: 42,
WaitFor: 3 * time.Second,
- OnLimit: func(float64, uint64) {
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
wg.Done()
},
})
- l.Start(&psproc{})
+ l.Start(&proc{})
defer l.Stop()
wg.Wait()
@@ -94,7 +102,7 @@ func TestCPULimitWaitFor(t *testing.T) {
lock.Unlock()
}()
- assert.Eventually(t, func() bool {
+ require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
@@ -113,14 +121,14 @@ func TestMemoryLimit(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
- l := NewLimiter(LimiterConfig{
+ l, _ := NewLimiter(LimiterConfig{
Memory: 42,
- OnLimit: func(float64, uint64) {
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
wg.Done()
},
})
- l.Start(&psproc{})
+ l.Start(&proc{})
defer l.Stop()
wg.Wait()
@@ -130,7 +138,7 @@ func TestMemoryLimit(t *testing.T) {
lock.Unlock()
}()
- assert.Eventually(t, func() bool {
+ require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
@@ -149,15 +157,88 @@ func TestMemoryLimitWaitFor(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
- l := NewLimiter(LimiterConfig{
+ l, _ := NewLimiter(LimiterConfig{
Memory: 42,
WaitFor: 3 * time.Second,
- OnLimit: func(float64, uint64) {
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
+ wg.Done()
+ },
+ })
+
+ l.Start(&proc{})
+ defer l.Stop()
+
+ wg.Wait()
+
+ lock.Lock()
+ done = true
+ lock.Unlock()
+ }()
+
+ require.Eventually(t, func() bool {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return done
+ }, 10*time.Second, 1*time.Second)
+}
+
+func TestGPUMemoryLimit(t *testing.T) {
+ lock := sync.Mutex{}
+
+ lock.Lock()
+ done := false
+ lock.Unlock()
+
+ go func() {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ l, _ := NewLimiter(LimiterConfig{
+ GPUMemory: 42,
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
+ wg.Done()
+ },
+ })
+
+ l.Start(&proc{})
+ defer l.Stop()
+
+ wg.Wait()
+
+ lock.Lock()
+ done = true
+ lock.Unlock()
+ }()
+
+ require.Eventually(t, func() bool {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return done
+ }, 2*time.Second, 100*time.Millisecond)
+}
+
+func TestGPUMemoryLimitWaitFor(t *testing.T) {
+ lock := sync.Mutex{}
+
+ lock.Lock()
+ done := false
+ lock.Unlock()
+
+ go func() {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ l, _ := NewLimiter(LimiterConfig{
+ GPUMemory: 42,
+ WaitFor: 3 * time.Second,
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
wg.Done()
},
})
- l.Start(&psproc{})
+ l.Start(&proc{})
defer l.Stop()
wg.Wait()
@@ -167,7 +248,7 @@ func TestMemoryLimitWaitFor(t *testing.T) {
lock.Unlock()
}()
- assert.Eventually(t, func() bool {
+ require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
@@ -186,18 +267,57 @@ func TestMemoryLimitSoftMode(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
- l := NewLimiter(LimiterConfig{
+ l, _ := NewLimiter(LimiterConfig{
Memory: 42,
Mode: LimitModeSoft,
- OnLimit: func(float64, uint64) {
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
+ wg.Done()
+ },
+ })
+
+ l.Start(&proc{})
+ defer l.Stop()
+
+ l.Limit(false, true, false)
+
+ wg.Wait()
+
+ lock.Lock()
+ done = true
+ lock.Unlock()
+ }()
+
+ require.Eventually(t, func() bool {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return done
+ }, 2*time.Second, 100*time.Millisecond)
+}
+
+func TestGPUMemoryLimitSoftMode(t *testing.T) {
+ lock := sync.Mutex{}
+
+ lock.Lock()
+ done := false
+ lock.Unlock()
+
+ go func() {
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ l, _ := NewLimiter(LimiterConfig{
+ GPUMemory: 42,
+ Mode: LimitModeSoft,
+ OnLimit: func(float64, uint64, float64, float64, float64, uint64) {
wg.Done()
},
})
- l.Start(&psproc{})
+ l.Start(&proc{})
defer l.Stop()
- l.Limit(false, true)
+ l.Limit(false, false, true)
wg.Wait()
@@ -206,7 +326,7 @@ func TestMemoryLimitSoftMode(t *testing.T) {
lock.Unlock()
}()
- assert.Eventually(t, func() bool {
+ require.Eventually(t, func() bool {
lock.Lock()
defer lock.Unlock()
diff --git a/process/process.go b/process/process.go
index c6bc01c8..1f757d74 100644
--- a/process/process.go
+++ b/process/process.go
@@ -18,7 +18,7 @@ import (
"unicode/utf8"
"github.com/datarhei/core/v16/log"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
)
// Process represents a process and ways to control it
@@ -43,32 +43,36 @@ type Process interface {
// running or not.
IsRunning() bool
- // Limit enabled or disables CPU and memory limiting. CPU will be throttled
+ // Limit enables or disables CPU and memory limiting. CPU will be throttled
// into the configured limit. If memory consumption is above the configured
// limit, the process will be killed.
- Limit(cpu, memory bool) error
+ Limit(cpu, memory, gpu bool) error
}
// Config is the configuration of a process
type Config struct {
- Binary string // Path to the ffmpeg binary.
- Args []string // List of arguments for the binary.
- Reconnect bool // Whether to restart the process if it exited.
- ReconnectDelay time.Duration // Duration to wait before restarting the process.
- StaleTimeout time.Duration // Kill the process after this duration if it doesn't produce any output.
- Timeout time.Duration // Kill the process after this duration.
- LimitCPU float64 // Kill the process if the CPU usage in percent is above this value.
- LimitMemory uint64 // Kill the process if the memory consumption in bytes is above this value.
- LimitDuration time.Duration // Kill the process if the limits are exceeded for this duration.
- LimitMode LimitMode // Select limiting mode
- Scheduler Scheduler // A scheduler.
- Parser Parser // A parser for the output of the process.
- OnArgs func(args []string) []string // A callback which is called right before the process will start with the command args.
- OnBeforeStart func() error // A callback which is called before the process will be started. If error is non-nil, the start will be refused.
- OnStart func() // A callback which is called after the process started.
- OnExit func(state string) // A callback which is called after the process exited with the exit state.
- OnStateChange func(from, to string) // A callback which is called after a state changed.
- Logger log.Logger
+ Binary string // Path to the ffmpeg binary.
+ Args []string // List of arguments for the binary.
+ Reconnect bool // Whether to restart the process if it exited.
+ ReconnectDelay time.Duration // Duration to wait before restarting the process.
+ StaleTimeout time.Duration // Kill the process after this duration if it doesn't produce any output.
+ Timeout time.Duration // Kill the process after this duration.
+ LimitCPU float64 // Kill the process if the CPU usage in percent is above this value, in percent 0-100 in hard mode, 0-100*ncpu in soft mode.
+ LimitMemory uint64 // Kill the process if the memory consumption in bytes is above this value.
+ LimitGPUUsage float64 // Kill the process if the GPU usage in percent is above this value, in percent 0-100.
+ LimitGPUEncoder float64 // Kill the process if the GPU encoder usage in percent is above this value, in percent 0-100.
+ LimitGPUDecoder float64 // Kill the process if the GPU decoder usage in percent is above this value, in percent 0-100.
+ LimitGPUMemory uint64 // Kill the process if the GPU memory consumption in bytes is above this value.
+ LimitDuration time.Duration // Kill the process if the limits are exceeded for this duration.
+ LimitMode LimitMode // Select limiting mode
+ Scheduler Scheduler // A scheduler.
+ Parser Parser // A parser for the output of the process.
+ OnBeforeStart func(args []string) ([]string, error) // A callback which is called before the process will be started. The string slice is the arguments of the command line. If error is non-nil, the start will be refused.
+ OnStart func() // A callback which is called after the process started.
+ OnExit func(state string) // A callback which is called after the process exited with the exit state.
+ OnStateChange func(from, to string) // A callback which is called after a state changed.
+ Resources resources.Resources
+ Logger log.Logger
}
// Status represents the current status of a process
@@ -81,20 +85,47 @@ type Status struct {
Time time.Time // Time is the time of the last change of the state
CommandArgs []string // Currently running command arguments
LimitMode string // The limiting mode
- CPU struct {
- NCPU float64 // Number of logical CPUs
- Current float64 // Currently consumed CPU in percent
- Average float64 // Average consumed CPU in percent
- Max float64 // Max. consumed CPU in percent
- Limit float64 // Usage limit in percent
- IsThrottling bool // Whether the CPU is currently limited
- } // Used CPU in percent
- Memory struct {
- Current uint64 // Currently consumed memory in bytes
- Average float64 // Average consumed memory in bytes
- Max uint64 // Max. consumed memory in bytes
- Limit uint64 // Usage limit in bytes
- } // Used memory in bytes
+ CPU StatusCPU // CPU consumption in percent
+ Memory StatusMemory // Memory consumption in bytes
+ GPU StatusGPU // GPU consumption
+}
+
+type StatusCPU struct {
+ NCPU float64 // Number of logical CPUs
+ Current float64 // Currently consumed CPU in percent
+ Average float64 // Average consumed CPU in percent
+ Max float64 // Max. consumed CPU in percent
+ Limit float64 // Usage limit in percent
+ IsThrottling bool // Whether the CPU is currently limited
+}
+
+type StatusMemory struct {
+ Current uint64 // Currently consumed memory in bytes
+ Average uint64 // Average consumed memory in bytes
+ Max uint64 // Max. consumed memory in bytes
+ Limit uint64 // Usage limit in bytes
+}
+
+type StatusGPUMemory struct {
+ Current uint64 // Currently consumed memory in bytes
+ Average uint64 // Average consumed memory in bytes
+ Max uint64 // Max. consumed memory in bytes
+ Limit uint64 // Usage limit in bytes
+}
+
+type StatusGPUUsage struct {
+ Current float64 // Currently consumed GPU usage in percent
+ Average float64 // Average consumed GPU usage in percent
+ Max float64 // Max. consumed GPU usage in percent
+ Limit float64 // Usage limit in percent
+}
+
+type StatusGPU struct {
+ Index int
+ Memory StatusGPUMemory // GPU memory consumption
+ Usage StatusGPUUsage // GPU usage in percent
+ Encoder StatusGPUUsage // GPU encoder usage in percent
+ Decoder StatusGPUUsage // GPU decoder usage in percent
}
// States
@@ -167,12 +198,13 @@ type States struct {
// Process represents a ffmpeg process
type process struct {
- binary string
- args []string
- cmd *exec.Cmd
- pid int32
- stdout io.ReadCloser
- state struct {
+ binary string
+ args []string
+ cmdArgs []string
+ cmd *exec.Cmd
+ pid int32
+ stdout io.ReadCloser
+ state struct {
state stateType
time time.Time
states States
@@ -206,8 +238,7 @@ type process struct {
logger log.Logger
debuglogger log.Logger
callbacks struct {
- onArgs func(args []string) []string
- onBeforeStart func() error
+ onBeforeStart func(args []string) ([]string, error)
onStart func()
onExit func(state string)
onStateChange func(from, to string)
@@ -215,6 +246,7 @@ type process struct {
}
limits Limiter
scheduler Scheduler
+ resources resources.Resources
}
var _ Process = &process{}
@@ -228,10 +260,17 @@ func New(config Config) (Process, error) {
parser: config.Parser,
logger: config.Logger,
scheduler: config.Scheduler,
+ resources: config.Resources,
+ }
+
+ if p.resources == nil {
+ return nil, fmt.Errorf("resources are required")
}
p.args = make([]string, len(config.Args))
copy(p.args, config.Args)
+ p.cmdArgs = make([]string, len(config.Args))
+ copy(p.cmdArgs, config.Args)
// This is a loose check on purpose. If the e.g. the binary
// doesn't exist or it is not executable, it will be
@@ -263,30 +302,44 @@ func New(config Config) (Process, error) {
p.stale.last = time.Now()
p.stale.timeout = config.StaleTimeout
- p.callbacks.onArgs = config.OnArgs
p.callbacks.onBeforeStart = config.OnBeforeStart
p.callbacks.onStart = config.OnStart
p.callbacks.onExit = config.OnExit
p.callbacks.onStateChange = config.OnStateChange
- p.limits = NewLimiter(LimiterConfig{
- CPU: config.LimitCPU,
- Memory: config.LimitMemory,
- WaitFor: config.LimitDuration,
- Mode: config.LimitMode,
- Logger: p.logger.WithComponent("ProcessLimiter"),
- OnLimit: func(cpu float64, memory uint64) {
+ ncpu := p.resources.Info().CPU.NCPU
+
+ limits, err := NewLimiter(LimiterConfig{
+ CPU: config.LimitCPU,
+ NCPU: ncpu,
+ Memory: config.LimitMemory,
+ GPUUsage: config.LimitGPUUsage,
+ GPUEncoder: config.LimitGPUEncoder,
+ GPUDecoder: config.LimitGPUDecoder,
+ GPUMemory: config.LimitGPUMemory,
+ WaitFor: config.LimitDuration,
+ Mode: config.LimitMode,
+ Logger: p.logger.WithComponent("ProcessLimiter"),
+ OnLimit: func(cpu float64, memory uint64, gpuusage, gpuencoder, gpudecoder float64, gpumemory uint64) {
if !p.isRunning() {
return
}
p.logger.WithFields(log.Fields{
- "cpu": cpu,
- "memory": memory,
+ "cpu": cpu,
+ "memory": memory,
+ "gpuusage": gpuusage,
+ "gpuencoder": gpuencoder,
+ "gpudecoder": gpudecoder,
+ "gpumemmory": gpumemory,
}).Warn().Log("Killed because limits are exceeded")
- p.Kill(false, fmt.Sprintf("Killed because limits are exceeded (mode: %s, tolerance: %s): %.2f (%.2f) CPU, %d (%d) bytes memory", config.LimitMode.String(), config.LimitDuration.String(), cpu, config.LimitCPU, memory, config.LimitMemory))
+ p.Kill(false, fmt.Sprintf("Killed because limits are exceeded (mode: %s, tolerance: %s): %.2f (%.2f) CPU, %d (%d) bytes memory, %.2f/%.2f/%.2f (%.2f) GPU usage, %d (%d) bytes GPU memory", config.LimitMode.String(), config.LimitDuration.String(), cpu, config.LimitCPU, memory, config.LimitMemory, gpuusage, gpuencoder, gpudecoder, config.LimitGPUUsage, gpumemory, config.LimitGPUMemory))
},
})
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize limiter")
+ }
+ p.limits = limits
p.logger.Info().Log("Created")
p.debuglogger.Debug().Log("Created")
@@ -467,12 +520,51 @@ func (p *process) Status() Status {
Duration: time.Since(stateTime),
Time: stateTime,
LimitMode: p.limits.Mode().String(),
- CPU: usage.CPU,
- Memory: usage.Memory,
+ CPU: StatusCPU{
+ NCPU: usage.CPU.NCPU,
+ Current: usage.CPU.Current,
+ Average: usage.CPU.Average,
+ Max: usage.CPU.Max,
+ Limit: usage.CPU.Limit,
+ IsThrottling: usage.CPU.IsThrottling,
+ },
+ Memory: StatusMemory{
+ Current: usage.Memory.Current,
+ Average: uint64(usage.Memory.Average),
+ Max: usage.Memory.Max,
+ Limit: usage.Memory.Limit,
+ },
+ GPU: StatusGPU{
+ Index: usage.GPU.Index,
+ Memory: StatusGPUMemory{
+ Current: usage.GPU.Memory.Current,
+ Average: uint64(usage.GPU.Memory.Average),
+ Max: usage.GPU.Memory.Max,
+ Limit: usage.GPU.Memory.Limit,
+ },
+ Usage: StatusGPUUsage{
+ Current: usage.GPU.Usage.Current,
+ Average: usage.GPU.Usage.Average,
+ Max: usage.GPU.Usage.Max,
+ Limit: usage.GPU.Usage.Limit,
+ },
+ Encoder: StatusGPUUsage{
+ Current: usage.GPU.Encoder.Current,
+ Average: usage.GPU.Encoder.Average,
+ Max: usage.GPU.Encoder.Max,
+ Limit: usage.GPU.Encoder.Limit,
+ },
+ Decoder: StatusGPUUsage{
+ Current: usage.GPU.Decoder.Current,
+ Average: usage.GPU.Decoder.Average,
+ Max: usage.GPU.Decoder.Max,
+ Limit: usage.GPU.Decoder.Limit,
+ },
+ },
}
- s.CommandArgs = make([]string, len(p.args))
- copy(s.CommandArgs, p.args)
+ s.CommandArgs = make([]string, len(p.cmdArgs))
+ copy(s.CommandArgs, p.cmdArgs)
if order == "start" && !state.IsRunning() {
p.reconn.lock.Lock()
@@ -488,7 +580,7 @@ func (p *process) IsRunning() bool {
return p.isRunning()
}
-func (p *process) Limit(cpu, memory bool) error {
+func (p *process) Limit(cpu, memory, gpu bool) error {
if !p.isRunning() {
return nil
}
@@ -498,11 +590,12 @@ func (p *process) Limit(cpu, memory bool) error {
}
p.logger.Warn().WithFields(log.Fields{
- "limit_cpu": cpu,
- "limit_memory": memory,
+ "limit_cpu": cpu,
+ "limit_memory": memory,
+ "limit_gpumemory": gpu,
}).Log("Limiter triggered")
- return p.limits.Limit(cpu, memory)
+ return p.limits.Limit(cpu, memory, gpu)
}
// Start will start the process and sets the order to "start". If the
@@ -559,11 +652,23 @@ func (p *process) start() error {
args := p.args
- if p.callbacks.onArgs != nil {
+ if p.callbacks.onBeforeStart != nil {
args = make([]string, len(p.args))
copy(args, p.args)
- args = p.callbacks.onArgs(args)
+ args, err = p.callbacks.onBeforeStart(args)
+ if err != nil {
+ p.setState(stateFailed)
+
+ p.parser.Parse([]byte(err.Error()))
+ p.logger.WithError(err).Error().Log("Starting failed")
+
+ p.reconnect(p.delay(stateFailed))
+
+ return err
+ }
+
+ p.cmdArgs = args
}
p.cmd = exec.Command(p.binary, args...)
@@ -582,19 +687,6 @@ func (p *process) start() error {
return err
}
- if p.callbacks.onBeforeStart != nil {
- if err := p.callbacks.onBeforeStart(); err != nil {
- p.setState(stateFailed)
-
- p.parser.Parse([]byte(err.Error()))
- p.logger.WithError(err).Error().Log("Starting failed")
-
- p.reconnect(p.delay(stateFailed))
-
- return err
- }
- }
-
if err := p.cmd.Start(); err != nil {
p.setState(stateFailed)
@@ -630,7 +722,7 @@ func (p *process) start() error {
p.pid = int32(p.cmd.Process.Pid)
- if proc, err := psutil.NewProcess(p.pid, false); err == nil {
+ if proc, err := p.resources.Process(p.pid); err == nil {
p.limits.Start(proc)
}
diff --git a/process/process_test.go b/process/process_test.go
index 11c669b9..7ce6542d 100644
--- a/process/process_test.go
+++ b/process/process_test.go
@@ -10,9 +10,20 @@ import (
"github.com/datarhei/core/v16/internal/testhelper"
"github.com/datarhei/core/v16/math/rand"
+ "github.com/datarhei/core/v16/resources"
+ "github.com/datarhei/core/v16/resources/psutil"
"github.com/stretchr/testify/require"
)
+func newResources() resources.Resources {
+ util, _ := psutil.New("", nil)
+ res, _ := resources.New(resources.Config{
+ PSUtil: util,
+ })
+
+ return res
+}
+
func TestProcess(t *testing.T) {
p, _ := New(Config{
Binary: "sleep",
@@ -21,6 +32,7 @@ func TestProcess(t *testing.T) {
},
Reconnect: false,
StaleTimeout: 0,
+ Resources: newResources(),
})
require.Equal(t, "finished", p.Status().State)
@@ -59,6 +71,7 @@ func TestReconnectProcess(t *testing.T) {
OnExit: func(string) {
wg.Done()
},
+ Resources: newResources(),
})
p.Start()
@@ -104,6 +117,7 @@ func TestStaleProcess(t *testing.T) {
},
Reconnect: false,
StaleTimeout: 2 * time.Second,
+ Resources: newResources(),
})
p.Start()
@@ -126,6 +140,7 @@ func TestStaleReconnectProcess(t *testing.T) {
Reconnect: true,
ReconnectDelay: 2 * time.Second,
StaleTimeout: 3 * time.Second,
+ Resources: newResources(),
})
p.Start()
@@ -156,6 +171,7 @@ func TestNonExistingProcess(t *testing.T) {
Reconnect: false,
ReconnectDelay: 5 * time.Second,
StaleTimeout: 0,
+ Resources: newResources(),
})
p.Start()
@@ -180,6 +196,7 @@ func TestNonExistingReconnectProcess(t *testing.T) {
Reconnect: true,
ReconnectDelay: 2 * time.Second,
StaleTimeout: 0,
+ Resources: newResources(),
})
p.Start()
@@ -203,6 +220,7 @@ func TestProcessFailed(t *testing.T) {
},
Reconnect: false,
StaleTimeout: 0,
+ Resources: newResources(),
})
p.Start()
@@ -217,7 +235,7 @@ func TestProcessFailed(t *testing.T) {
}
func TestFFmpegWaitStop(t *testing.T) {
- binary, err := testhelper.BuildBinary("sigintwait", "../internal/testhelper")
+ binary, err := testhelper.BuildBinary("sigintwait")
require.NoError(t, err, "Failed to build helper program")
p, _ := New(Config{
@@ -228,6 +246,7 @@ func TestFFmpegWaitStop(t *testing.T) {
OnExit: func(state string) {
time.Sleep(3 * time.Second)
},
+ Resources: newResources(),
})
err = p.Start()
@@ -247,7 +266,7 @@ func TestFFmpegWaitStop(t *testing.T) {
}
func TestFFmpegKill(t *testing.T) {
- binary, err := testhelper.BuildBinary("sigint", "../internal/testhelper")
+ binary, err := testhelper.BuildBinary("sigint")
require.NoError(t, err, "Failed to build helper program")
p, _ := New(Config{
@@ -255,6 +274,7 @@ func TestFFmpegKill(t *testing.T) {
Args: []string{},
Reconnect: false,
StaleTimeout: 0,
+ Resources: newResources(),
})
err = p.Start()
@@ -272,7 +292,7 @@ func TestFFmpegKill(t *testing.T) {
}
func TestProcessForceKill(t *testing.T) {
- binary, err := testhelper.BuildBinary("ignoresigint", "../internal/testhelper")
+ binary, err := testhelper.BuildBinary("ignoresigint")
require.NoError(t, err, "Failed to build helper program")
p, _ := New(Config{
@@ -280,6 +300,7 @@ func TestProcessForceKill(t *testing.T) {
Args: []string{},
Reconnect: false,
StaleTimeout: 0,
+ Resources: newResources(),
})
err = p.Start()
@@ -305,13 +326,14 @@ func TestProcessForceKill(t *testing.T) {
}
func TestProcessDuration(t *testing.T) {
- binary, err := testhelper.BuildBinary("sigint", "../internal/testhelper")
+ binary, err := testhelper.BuildBinary("sigint")
require.NoError(t, err, "Failed to build helper program")
p, err := New(Config{
- Binary: binary,
- Args: []string{},
- Timeout: 3 * time.Second,
+ Binary: binary,
+ Args: []string{},
+ Timeout: 3 * time.Second,
+ Resources: newResources(),
})
require.NoError(t, err)
@@ -358,6 +380,7 @@ func TestProcessSchedulePointInTime(t *testing.T) {
},
Reconnect: false,
Scheduler: s,
+ Resources: newResources(),
})
status := p.Status()
@@ -399,6 +422,7 @@ func TestProcessSchedulePointInTimeGone(t *testing.T) {
},
Reconnect: false,
Scheduler: s,
+ Resources: newResources(),
})
status := p.Status()
@@ -424,6 +448,7 @@ func TestProcessScheduleCron(t *testing.T) {
},
Reconnect: false,
Scheduler: s,
+ Resources: newResources(),
})
status := p.Status()
@@ -454,6 +479,7 @@ func TestProcessDelayNoScheduler(t *testing.T) {
Binary: "sleep",
Reconnect: false,
ReconnectDelay: 5 * time.Second,
+ Resources: newResources(),
})
px := p.(*process)
@@ -470,6 +496,7 @@ func TestProcessDelayNoScheduler(t *testing.T) {
Binary: "sleep",
Reconnect: true,
ReconnectDelay: 5 * time.Second,
+ Resources: newResources(),
})
px = p.(*process)
@@ -493,6 +520,7 @@ func TestProcessDelaySchedulerNoReconnect(t *testing.T) {
Reconnect: false,
ReconnectDelay: 1 * time.Second,
Scheduler: s,
+ Resources: newResources(),
})
px := p.(*process)
@@ -514,6 +542,7 @@ func TestProcessDelaySchedulerNoReconnect(t *testing.T) {
Reconnect: false,
ReconnectDelay: 1 * time.Second,
Scheduler: s,
+ Resources: newResources(),
})
px = p.(*process)
@@ -537,6 +566,7 @@ func TestProcessDelaySchedulerReconnect(t *testing.T) {
Reconnect: true,
ReconnectDelay: 1 * time.Second,
Scheduler: s,
+ Resources: newResources(),
})
px := p.(*process)
@@ -558,6 +588,7 @@ func TestProcessDelaySchedulerReconnect(t *testing.T) {
Reconnect: true,
ReconnectDelay: 1 * time.Second,
Scheduler: s,
+ Resources: newResources(),
})
px = p.(*process)
@@ -579,6 +610,7 @@ func TestProcessDelaySchedulerReconnect(t *testing.T) {
Reconnect: true,
ReconnectDelay: 10 * time.Second,
Scheduler: s,
+ Resources: newResources(),
})
px = p.(*process)
@@ -606,21 +638,15 @@ func TestProcessCallbacks(t *testing.T) {
"2",
},
Reconnect: false,
- OnArgs: func(a []string) []string {
- lock.Lock()
- defer lock.Unlock()
-
- args = make([]string, len(a))
- copy(args, a)
- return a
- },
- OnBeforeStart: func() error {
+ OnBeforeStart: func(a []string) ([]string, error) {
lock.Lock()
defer lock.Unlock()
onBeforeStart = true
- return nil
+ args = make([]string, len(a))
+ copy(args, a)
+ return a, nil
},
OnStart: func() {
lock.Lock()
@@ -642,6 +668,7 @@ func TestProcessCallbacks(t *testing.T) {
onState = append(onState, from+"/"+to)
},
+ Resources: newResources(),
})
require.NoError(t, err)
@@ -681,9 +708,10 @@ func TestProcessCallbacksOnBeforeStart(t *testing.T) {
Parser: parser,
Reconnect: true,
ReconnectDelay: 10 * time.Second,
- OnBeforeStart: func() error {
- return fmt.Errorf("no, not now")
+ OnBeforeStart: func(a []string) ([]string, error) {
+ return a, fmt.Errorf("no, not now")
},
+ Resources: newResources(),
})
require.NoError(t, err)
diff --git a/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_period_us b/resources/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_period_us
similarity index 100%
rename from psutil/fixtures/cgroup-limited/cpu/cpu.cfs_period_us
rename to resources/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_period_us
diff --git a/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_quota_us b/resources/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_quota_us
similarity index 100%
rename from psutil/fixtures/cgroup-limited/cpu/cpu.cfs_quota_us
rename to resources/psutil/fixtures/cgroup-limited/cpu/cpu.cfs_quota_us
diff --git a/psutil/fixtures/cgroup-limited/cpuacct/cpuacct.usage b/resources/psutil/fixtures/cgroup-limited/cpuacct/cpuacct.usage
similarity index 100%
rename from psutil/fixtures/cgroup-limited/cpuacct/cpuacct.usage
rename to resources/psutil/fixtures/cgroup-limited/cpuacct/cpuacct.usage
diff --git a/psutil/fixtures/cgroup-limited/memory/memory.limit_in_bytes b/resources/psutil/fixtures/cgroup-limited/memory/memory.limit_in_bytes
similarity index 100%
rename from psutil/fixtures/cgroup-limited/memory/memory.limit_in_bytes
rename to resources/psutil/fixtures/cgroup-limited/memory/memory.limit_in_bytes
diff --git a/psutil/fixtures/cgroup-limited/memory/memory.usage_in_bytes b/resources/psutil/fixtures/cgroup-limited/memory/memory.usage_in_bytes
similarity index 100%
rename from psutil/fixtures/cgroup-limited/memory/memory.usage_in_bytes
rename to resources/psutil/fixtures/cgroup-limited/memory/memory.usage_in_bytes
diff --git a/psutil/fixtures/cgroup/cpu/cpu.cfs_period_us b/resources/psutil/fixtures/cgroup/cpu/cpu.cfs_period_us
similarity index 100%
rename from psutil/fixtures/cgroup/cpu/cpu.cfs_period_us
rename to resources/psutil/fixtures/cgroup/cpu/cpu.cfs_period_us
diff --git a/psutil/fixtures/cgroup/cpu/cpu.cfs_quota_us b/resources/psutil/fixtures/cgroup/cpu/cpu.cfs_quota_us
similarity index 100%
rename from psutil/fixtures/cgroup/cpu/cpu.cfs_quota_us
rename to resources/psutil/fixtures/cgroup/cpu/cpu.cfs_quota_us
diff --git a/psutil/fixtures/cgroup/cpuacct/cpuacct.usage b/resources/psutil/fixtures/cgroup/cpuacct/cpuacct.usage
similarity index 100%
rename from psutil/fixtures/cgroup/cpuacct/cpuacct.usage
rename to resources/psutil/fixtures/cgroup/cpuacct/cpuacct.usage
diff --git a/psutil/fixtures/cgroup/memory/memory.limit_in_bytes b/resources/psutil/fixtures/cgroup/memory/memory.limit_in_bytes
similarity index 100%
rename from psutil/fixtures/cgroup/memory/memory.limit_in_bytes
rename to resources/psutil/fixtures/cgroup/memory/memory.limit_in_bytes
diff --git a/psutil/fixtures/cgroup/memory/memory.usage_in_bytes b/resources/psutil/fixtures/cgroup/memory/memory.usage_in_bytes
similarity index 100%
rename from psutil/fixtures/cgroup/memory/memory.usage_in_bytes
rename to resources/psutil/fixtures/cgroup/memory/memory.usage_in_bytes
diff --git a/psutil/fixtures/cgroup2-limited/cpu.max b/resources/psutil/fixtures/cgroup2-limited/cpu.max
similarity index 100%
rename from psutil/fixtures/cgroup2-limited/cpu.max
rename to resources/psutil/fixtures/cgroup2-limited/cpu.max
diff --git a/psutil/fixtures/cgroup2-limited/cpu.stat b/resources/psutil/fixtures/cgroup2-limited/cpu.stat
similarity index 100%
rename from psutil/fixtures/cgroup2-limited/cpu.stat
rename to resources/psutil/fixtures/cgroup2-limited/cpu.stat
diff --git a/psutil/fixtures/cgroup2-limited/memory.current b/resources/psutil/fixtures/cgroup2-limited/memory.current
similarity index 100%
rename from psutil/fixtures/cgroup2-limited/memory.current
rename to resources/psutil/fixtures/cgroup2-limited/memory.current
diff --git a/psutil/fixtures/cgroup2-limited/memory.max b/resources/psutil/fixtures/cgroup2-limited/memory.max
similarity index 100%
rename from psutil/fixtures/cgroup2-limited/memory.max
rename to resources/psutil/fixtures/cgroup2-limited/memory.max
diff --git a/psutil/fixtures/cgroup2/cpu.max b/resources/psutil/fixtures/cgroup2/cpu.max
similarity index 100%
rename from psutil/fixtures/cgroup2/cpu.max
rename to resources/psutil/fixtures/cgroup2/cpu.max
diff --git a/psutil/fixtures/cgroup2/cpu.stat b/resources/psutil/fixtures/cgroup2/cpu.stat
similarity index 100%
rename from psutil/fixtures/cgroup2/cpu.stat
rename to resources/psutil/fixtures/cgroup2/cpu.stat
diff --git a/psutil/fixtures/cgroup2/memory.current b/resources/psutil/fixtures/cgroup2/memory.current
similarity index 100%
rename from psutil/fixtures/cgroup2/memory.current
rename to resources/psutil/fixtures/cgroup2/memory.current
diff --git a/psutil/fixtures/cgroup2/memory.max b/resources/psutil/fixtures/cgroup2/memory.max
similarity index 100%
rename from psutil/fixtures/cgroup2/memory.max
rename to resources/psutil/fixtures/cgroup2/memory.max
diff --git a/resources/psutil/gpu/gpu.go b/resources/psutil/gpu/gpu.go
new file mode 100644
index 00000000..dee49f5e
--- /dev/null
+++ b/resources/psutil/gpu/gpu.go
@@ -0,0 +1,57 @@
+package gpu
+
+import "errors"
+
+type Process struct {
+ PID int32
+ Index int
+ Memory uint64 // bytes
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+}
+
+type Stats struct {
+ Index int
+ ID string
+ Name string
+ Architecture string
+
+ MemoryTotal uint64 // bytes
+ MemoryUsed uint64 // bytes
+
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+
+ Process []Process
+
+ Extension interface{}
+}
+
+type GPU interface {
+ // Count returns the number of GPU in the system.
+ Count() (int, error)
+
+ // Stats returns current GPU stats.
+ Stats() ([]Stats, error)
+
+ // Process returns a Process.
+ Process(pid int32) (Process, error)
+
+ // Close stops all GPU collection processes
+ Close()
+}
+
+var ErrProcessNotFound = errors.New("process not found")
+
+type dummy struct{}
+
+func (d *dummy) Count() (int, error) { return 0, nil }
+func (d *dummy) Stats() ([]Stats, error) { return nil, nil }
+func (d *dummy) Process(pid int32) (Process, error) { return Process{}, ErrProcessNotFound }
+func (d *dummy) Close() {}
+
+func NewNilGPU() GPU {
+ return &dummy{}
+}
diff --git a/resources/psutil/gpu/nvidia/fixtures/process.txt b/resources/psutil/gpu/nvidia/fixtures/process.txt
new file mode 100644
index 00000000..55d7bcf4
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/fixtures/process.txt
@@ -0,0 +1,54 @@
+# gpu pid type sm mem enc dec fb command
+# Idx # C/G % % % % MB name
+ 0 7372 C 2 0 2 - 136 ffmpeg
+ 0 12176 C 5 2 3 7 782 ffmpeg
+ 0 20035 C 8 2 4 1 1145 ffmpeg
+ 0 20141 C 2 1 1 3 429 ffmpeg
+ 0 29591 C 2 1 - 2 435 ffmpeg
+ 0 7372 C 2 0 - - 136 ffmpeg
+ 0 12176 C 8 3 7 9 782 ffmpeg
+ 0 20035 C 8 2 3 1 1145 ffmpeg
+ 0 20141 C - - 1 1 429 ffmpeg
+ 0 29591 C 3 1 - 2 435 ffmpeg
+ 0 7372 C 2 1 1 - 136 ffmpeg
+ 0 12176 C 5 1 5 7 782 ffmpeg
+ 0 20035 C 8 3 1 4 1145 ffmpeg
+ 0 20141 C 2 0 1 - 429 ffmpeg
+ 0 29591 C 2 0 1 3 435 ffmpeg
+ 0 7372 C 2 0 - - 136 ffmpeg
+ 0 12176 C 5 1 5 3 782 ffmpeg
+ 0 20035 C 8 2 5 4 1145 ffmpeg
+ 0 20141 C 3 1 - 5 429 ffmpeg
+ 0 29591 C 2 0 - 1 435 ffmpeg
+ 0 7372 C 2 1 - - 136 ffmpeg
+ 0 12176 C 10 3 6 8 782 ffmpeg
+ 0 20035 C 3 1 1 1 1145 ffmpeg
+ 0 20141 C - - 4 1 429 ffmpeg
+ 0 29591 C 5 2 - 2 435 ffmpeg
+ 0 7372 C 5 1 2 - 136 ffmpeg
+ 0 12176 C 6 2 4 7 782 ffmpeg
+ 0 20035 C - - - - 1145 ffmpeg
+ 0 20141 C 5 1 1 3 429 ffmpeg
+ 0 29591 C 5 2 2 4 435 ffmpeg
+ 0 7372 C - - 1 - 136 ffmpeg
+ 0 12176 C 7 2 3 4 782 ffmpeg
+ 0 20035 C 2 0 - 1 1145 ffmpeg
+ 0 20141 C 7 2 4 4 429 ffmpeg
+ 0 29591 C 5 1 2 3 435 ffmpeg
+ 0 7372 C 2 0 1 - 136 ffmpeg
+ 0 12176 C 9 3 3 6 782 ffmpeg
+ 0 20035 C 2 1 - 1 1145 ffmpeg
+ 0 20141 C 4 1 4 5 429 ffmpeg
+ 0 29591 C 2 0 2 1 435 ffmpeg
+ 0 7372 C - - - - 136 ffmpeg
+ 0 12176 C 10 3 4 8 782 ffmpeg
+ 0 20035 C 4 1 2 1 1145 ffmpeg
+ 0 20141 C 7 2 3 3 429 ffmpeg
+# gpu pid type sm mem enc dec fb command
+# Idx # C/G % % % % MB name
+ 0 29591 C - - 1 1 435 ffmpeg
+ 0 7372 C 2 0 2 - 136 ffmpeg
+ 0 12176 C 7 2 2 6 782 ffmpeg
+ 0 20035 C 7 2 4 3 1145 ffmpeg
+ 0 20141 C 5 1 1 3 429 ffmpeg
+ 0 29591 C - - 1 1 435 ffmpeg
\ No newline at end of file
diff --git a/resources/psutil/gpu/nvidia/fixtures/process_noprocesses.txt b/resources/psutil/gpu/nvidia/fixtures/process_noprocesses.txt
new file mode 100644
index 00000000..7d9f98ed
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/fixtures/process_noprocesses.txt
@@ -0,0 +1,3 @@
+# gpu pid type sm mem enc dec fb command
+# Idx # C/G % % % % MB name
+ 0 - - - - - - - -
\ No newline at end of file
diff --git a/resources/psutil/gpu/nvidia/fixtures/query1.xml b/resources/psutil/gpu/nvidia/fixtures/query1.xml
new file mode 100644
index 00000000..10b35010
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/fixtures/query1.xml
@@ -0,0 +1,725 @@
+
+
+
+ Mon Jul 15 13:50:34 2024
+ 495.29.05
+ 11.5
+ 1
+
+ NVIDIA GeForce GTX 1080
+ GeForce
+ Pascal
+ Disabled
+ Disabled
+ Disabled
+
+ N/A
+ N/A
+
+
+ None
+
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ N/A
+ GPU-d8249424-2ed0-0499-2d47-8c6905e3ef5b
+ 0
+ 86.04.17.00.01
+ No
+ 0x100
+ N/A
+ 0
+
+ G001.0000.01.03
+ 1.1
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+ None
+ N/A
+
+
+ N/A
+
+
+ 01
+ 00
+ 0000
+ 1B8010DE
+ 00000000:01:00.0
+ 119E10DE
+
+
+ 3
+ 3
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 106000 KB/s
+ 309000 KB/s
+
+ 44 %
+ P2
+
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+
+ 8119 MiB
+ 918 MiB
+ 7201 MiB
+
+
+ 256 MiB
+ 2 MiB
+ 254 MiB
+
+ Default
+
+ 15 %
+ 7 %
+ 3 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+ N/A
+
+ 55 C
+ 99 C
+ 96 C
+ N/A
+ 83 C
+ N/A
+ N/A
+
+
+ 60 C
+ 92 C
+
+
+ P2
+ Supported
+ 42.64 W
+ 180.00 W
+ 180.00 W
+ 180.00 W
+ 90.00 W
+ 180.00 W
+
+
+ 1607 MHz
+ 1607 MHz
+ 4513 MHz
+ 1442 MHz
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ 1911 MHz
+ 1911 MHz
+ 5005 MHz
+ 1708 MHz
+
+
+ N/A
+
+
+ N/A
+ N/A
+
+
+ N/A
+
+
+
+ 5005 MHz
+ 1911 MHz
+ 1898 MHz
+ 1885 MHz
+ 1873 MHz
+ 1860 MHz
+ 1847 MHz
+ 1835 MHz
+ 1822 MHz
+ 1809 MHz
+ 1797 MHz
+ 1784 MHz
+ 1771 MHz
+ 1759 MHz
+ 1746 MHz
+ 1733 MHz
+ 1721 MHz
+ 1708 MHz
+ 1695 MHz
+ 1683 MHz
+ 1670 MHz
+ 1657 MHz
+ 1645 MHz
+ 1632 MHz
+ 1620 MHz
+ 1607 MHz
+ 1594 MHz
+ 1582 MHz
+ 1569 MHz
+ 1556 MHz
+ 1544 MHz
+ 1531 MHz
+ 1518 MHz
+ 1506 MHz
+ 1493 MHz
+ 1480 MHz
+ 1468 MHz
+ 1455 MHz
+ 1442 MHz
+ 1430 MHz
+ 1417 MHz
+ 1404 MHz
+ 1392 MHz
+ 1379 MHz
+ 1366 MHz
+ 1354 MHz
+ 1341 MHz
+ 1328 MHz
+ 1316 MHz
+ 1303 MHz
+ 1290 MHz
+ 1278 MHz
+ 1265 MHz
+ 1252 MHz
+ 1240 MHz
+ 1227 MHz
+ 1215 MHz
+ 1202 MHz
+ 1189 MHz
+ 1177 MHz
+ 1164 MHz
+ 1151 MHz
+ 1139 MHz
+ 1126 MHz
+ 1113 MHz
+ 1101 MHz
+ 1088 MHz
+ 1075 MHz
+ 1063 MHz
+ 1050 MHz
+ 1037 MHz
+ 1025 MHz
+ 1012 MHz
+ 999 MHz
+ 987 MHz
+ 974 MHz
+ 961 MHz
+ 949 MHz
+ 936 MHz
+ 923 MHz
+ 911 MHz
+ 898 MHz
+ 885 MHz
+ 873 MHz
+ 860 MHz
+ 847 MHz
+ 835 MHz
+ 822 MHz
+ 810 MHz
+ 797 MHz
+ 784 MHz
+ 772 MHz
+ 759 MHz
+ 746 MHz
+ 734 MHz
+ 721 MHz
+ 708 MHz
+ 696 MHz
+ 683 MHz
+ 670 MHz
+ 658 MHz
+ 645 MHz
+ 632 MHz
+ 620 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+ 4513 MHz
+ 1911 MHz
+ 1898 MHz
+ 1885 MHz
+ 1873 MHz
+ 1860 MHz
+ 1847 MHz
+ 1835 MHz
+ 1822 MHz
+ 1809 MHz
+ 1797 MHz
+ 1784 MHz
+ 1771 MHz
+ 1759 MHz
+ 1746 MHz
+ 1733 MHz
+ 1721 MHz
+ 1708 MHz
+ 1695 MHz
+ 1683 MHz
+ 1670 MHz
+ 1657 MHz
+ 1645 MHz
+ 1632 MHz
+ 1620 MHz
+ 1607 MHz
+ 1594 MHz
+ 1582 MHz
+ 1569 MHz
+ 1556 MHz
+ 1544 MHz
+ 1531 MHz
+ 1518 MHz
+ 1506 MHz
+ 1493 MHz
+ 1480 MHz
+ 1468 MHz
+ 1455 MHz
+ 1442 MHz
+ 1430 MHz
+ 1417 MHz
+ 1404 MHz
+ 1392 MHz
+ 1379 MHz
+ 1366 MHz
+ 1354 MHz
+ 1341 MHz
+ 1328 MHz
+ 1316 MHz
+ 1303 MHz
+ 1290 MHz
+ 1278 MHz
+ 1265 MHz
+ 1252 MHz
+ 1240 MHz
+ 1227 MHz
+ 1215 MHz
+ 1202 MHz
+ 1189 MHz
+ 1177 MHz
+ 1164 MHz
+ 1151 MHz
+ 1139 MHz
+ 1126 MHz
+ 1113 MHz
+ 1101 MHz
+ 1088 MHz
+ 1075 MHz
+ 1063 MHz
+ 1050 MHz
+ 1037 MHz
+ 1025 MHz
+ 1012 MHz
+ 999 MHz
+ 987 MHz
+ 974 MHz
+ 961 MHz
+ 949 MHz
+ 936 MHz
+ 923 MHz
+ 911 MHz
+ 898 MHz
+ 885 MHz
+ 873 MHz
+ 860 MHz
+ 847 MHz
+ 835 MHz
+ 822 MHz
+ 810 MHz
+ 797 MHz
+ 784 MHz
+ 772 MHz
+ 759 MHz
+ 746 MHz
+ 734 MHz
+ 721 MHz
+ 708 MHz
+ 696 MHz
+ 683 MHz
+ 670 MHz
+ 658 MHz
+ 645 MHz
+ 632 MHz
+ 620 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+ 810 MHz
+ 1911 MHz
+ 1898 MHz
+ 1885 MHz
+ 1873 MHz
+ 1860 MHz
+ 1847 MHz
+ 1835 MHz
+ 1822 MHz
+ 1809 MHz
+ 1797 MHz
+ 1784 MHz
+ 1771 MHz
+ 1759 MHz
+ 1746 MHz
+ 1733 MHz
+ 1721 MHz
+ 1708 MHz
+ 1695 MHz
+ 1683 MHz
+ 1670 MHz
+ 1657 MHz
+ 1645 MHz
+ 1632 MHz
+ 1620 MHz
+ 1607 MHz
+ 1594 MHz
+ 1582 MHz
+ 1569 MHz
+ 1556 MHz
+ 1544 MHz
+ 1531 MHz
+ 1518 MHz
+ 1506 MHz
+ 1493 MHz
+ 1480 MHz
+ 1468 MHz
+ 1455 MHz
+ 1442 MHz
+ 1430 MHz
+ 1417 MHz
+ 1404 MHz
+ 1392 MHz
+ 1379 MHz
+ 1366 MHz
+ 1354 MHz
+ 1341 MHz
+ 1328 MHz
+ 1316 MHz
+ 1303 MHz
+ 1290 MHz
+ 1278 MHz
+ 1265 MHz
+ 1252 MHz
+ 1240 MHz
+ 1227 MHz
+ 1215 MHz
+ 1202 MHz
+ 1189 MHz
+ 1177 MHz
+ 1164 MHz
+ 1151 MHz
+ 1139 MHz
+ 1126 MHz
+ 1113 MHz
+ 1101 MHz
+ 1088 MHz
+ 1075 MHz
+ 1063 MHz
+ 1050 MHz
+ 1037 MHz
+ 1025 MHz
+ 1012 MHz
+ 999 MHz
+ 987 MHz
+ 974 MHz
+ 961 MHz
+ 949 MHz
+ 936 MHz
+ 923 MHz
+ 911 MHz
+ 898 MHz
+ 885 MHz
+ 873 MHz
+ 860 MHz
+ 847 MHz
+ 835 MHz
+ 822 MHz
+ 810 MHz
+ 797 MHz
+ 784 MHz
+ 772 MHz
+ 759 MHz
+ 746 MHz
+ 734 MHz
+ 721 MHz
+ 708 MHz
+ 696 MHz
+ 683 MHz
+ 670 MHz
+ 658 MHz
+ 645 MHz
+ 632 MHz
+ 620 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+ 405 MHz
+ 607 MHz
+ 594 MHz
+ 582 MHz
+ 569 MHz
+ 556 MHz
+ 544 MHz
+ 531 MHz
+ 518 MHz
+ 506 MHz
+ 493 MHz
+ 480 MHz
+ 468 MHz
+ 455 MHz
+ 442 MHz
+ 430 MHz
+ 417 MHz
+ 405 MHz
+ 392 MHz
+ 379 MHz
+ 367 MHz
+ 354 MHz
+ 341 MHz
+ 329 MHz
+ 316 MHz
+ 303 MHz
+ 291 MHz
+ 278 MHz
+ 265 MHz
+ 253 MHz
+ 240 MHz
+ 227 MHz
+ 215 MHz
+ 202 MHz
+ 189 MHz
+ 177 MHz
+ 164 MHz
+ 151 MHz
+ 139 MHz
+
+
+
+
+ N/A
+ N/A
+ 18179
+ C
+ /usr/local/bin/ffmpeg
+ 916 MiB
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/psutil/gpu/nvidia/fixtures/query2.xml b/resources/psutil/gpu/nvidia/fixtures/query2.xml
new file mode 100644
index 00000000..4d93cac0
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/fixtures/query2.xml
@@ -0,0 +1,908 @@
+
+
+
+ Mon Jul 15 13:41:56 2024
+ 555.42.06
+ 12.5
+ 2
+
+ NVIDIA L4
+ NVIDIA
+ Ada Lovelace
+ Enabled
+ Disabled
+ Disabled
+ None
+
+ N/A
+ N/A
+
+
+ None
+
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ 1654523003308
+ GPU-c5533cd4-5a60-059e-348d-b6d7466932e4
+ 1
+ 95.04.29.00.06
+ No
+ 0x100
+ 900-2G193-0000-001
+ 27B8-895-A1
+ N/A
+ 1
+
+ G193.0200.00.01
+ 2.1
+ 6.16
+ N/A
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+ None
+ N/A
+ N/A
+
+
+ No
+ N/A
+
+ 555.42.06
+
+ N/A
+
+
+ 01
+ 00
+ 0000
+ 3
+ 2
+ 27B810DE
+ 00000000:01:00.0
+ 16CA10DE
+
+
+ 4
+ 4
+ 4
+ 4
+ 5
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+ N/A
+ N/A
+
+ N/A
+ P0
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+ N/A
+
+ 23034 MiB
+ 434 MiB
+ 1 MiB
+ 22601 MiB
+
+
+ 32768 MiB
+ 1 MiB
+ 32767 MiB
+
+
+ 0 MiB
+ 0 MiB
+ 0 MiB
+
+ Default
+
+ 2 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ Enabled
+ Enabled
+
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ No
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+
+ 0
+ 0
+ No
+ No
+
+ 96 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+
+
+
+ 45 C
+ 39 C
+ -5 C
+ -2 C
+ 0 C
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ P0
+ 27.22 W
+ 72.00 W
+ 72.00 W
+ 72.00 W
+ 40.00 W
+ 72.00 W
+
+
+ N/A
+
+
+ P0
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6250 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6251 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+
+
+ N/A
+ N/A
+
+
+ 885.000 mV
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+ N/A
+
+
+
+
+ 6251 MHz
+ 2040 MHz
+ 2025 MHz
+ 2010 MHz
+ 1995 MHz
+ 1980 MHz
+ 1965 MHz
+ 1950 MHz
+ 1935 MHz
+ 1920 MHz
+ 1905 MHz
+ 1890 MHz
+ 1875 MHz
+ 1860 MHz
+ 1845 MHz
+ 1830 MHz
+ 1815 MHz
+ 1800 MHz
+ 1785 MHz
+ 1770 MHz
+ 1755 MHz
+ 1740 MHz
+ 1725 MHz
+ 1710 MHz
+ 1695 MHz
+ 1680 MHz
+ 1665 MHz
+ 1650 MHz
+ 1635 MHz
+ 1620 MHz
+ 1605 MHz
+ 1590 MHz
+ 1575 MHz
+ 1560 MHz
+ 1545 MHz
+ 1530 MHz
+ 1515 MHz
+ 1500 MHz
+ 1485 MHz
+ 1470 MHz
+ 1455 MHz
+ 1440 MHz
+ 1425 MHz
+ 1410 MHz
+ 1395 MHz
+ 1380 MHz
+ 1365 MHz
+ 1350 MHz
+ 1335 MHz
+ 1320 MHz
+ 1305 MHz
+ 1290 MHz
+ 1275 MHz
+ 1260 MHz
+ 1245 MHz
+ 1230 MHz
+ 1215 MHz
+ 1200 MHz
+ 1185 MHz
+ 1170 MHz
+ 1155 MHz
+ 1140 MHz
+ 1125 MHz
+ 1110 MHz
+ 1095 MHz
+ 1080 MHz
+ 1065 MHz
+ 1050 MHz
+ 1035 MHz
+ 1020 MHz
+ 1005 MHz
+ 990 MHz
+ 975 MHz
+ 960 MHz
+ 945 MHz
+ 930 MHz
+ 915 MHz
+ 900 MHz
+ 885 MHz
+ 870 MHz
+ 855 MHz
+ 840 MHz
+ 825 MHz
+ 810 MHz
+ 795 MHz
+ 780 MHz
+ 765 MHz
+ 750 MHz
+ 735 MHz
+ 720 MHz
+ 705 MHz
+ 690 MHz
+ 675 MHz
+ 660 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+ 405 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+
+
+ 10131
+ C
+ ffmpeg
+ 389 MiB
+
+
+ 13597
+ C
+ ffmpeg
+ 1054 MiB
+
+
+
+
+
+ disabled
+
+
+
+
+ NVIDIA L4
+ NVIDIA
+ Ada Lovelace
+ Enabled
+ Disabled
+ Disabled
+ None
+
+ N/A
+ N/A
+
+
+ None
+
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ 1654523001128
+ GPU-128ab6fb-6ec9-fd74-b479-4a5fd14f55bd
+ 0
+ 95.04.29.00.06
+ No
+ 0xc100
+ 900-2G193-0000-001
+ 27B8-895-A1
+ N/A
+ 1
+
+ G193.0200.00.01
+ 2.1
+ 6.16
+ N/A
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+ None
+ N/A
+ N/A
+
+
+ No
+ N/A
+
+ 555.42.06
+
+ N/A
+
+
+ C1
+ 00
+ 0000
+ 3
+ 2
+ 27B810DE
+ 00000000:C1:00.0
+ 16CA10DE
+
+
+ 4
+ 4
+ 4
+ 4
+ 5
+
+
+ 16x
+ 1x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 0 KB/s
+ 0 KB/s
+ N/A
+ N/A
+
+ N/A
+ P0
+
+ Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+ N/A
+
+ 23034 MiB
+ 434 MiB
+ 1 MiB
+ 22601 MiB
+
+
+ 32768 MiB
+ 1 MiB
+ 32767 MiB
+
+
+ 0 MiB
+ 0 MiB
+ 0 MiB
+
+ Default
+
+ 3 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+ 0 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ Enabled
+ Enabled
+
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+ No
+
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+
+ 0
+ 0
+ No
+ No
+
+ 96 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+ 0 bank(s)
+
+
+
+ 40 C
+ 43 C
+ -5 C
+ -2 C
+ 0 C
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ P0
+ 29.54 W
+ 72.00 W
+ 72.00 W
+ 72.00 W
+ 40.00 W
+ 72.00 W
+
+
+ N/A
+
+
+ P0
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6250 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ 2040 MHz
+ 6251 MHz
+
+
+ N/A
+
+
+ 2040 MHz
+ 2040 MHz
+ 6251 MHz
+ 1770 MHz
+
+
+ 2040 MHz
+
+
+ N/A
+ N/A
+
+
+ 910.000 mV
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+
+ N/A
+
+
+
+
+ 6251 MHz
+ 2040 MHz
+ 2025 MHz
+ 2010 MHz
+ 1995 MHz
+ 1980 MHz
+ 1965 MHz
+ 1950 MHz
+ 1935 MHz
+ 1920 MHz
+ 1905 MHz
+ 1890 MHz
+ 1875 MHz
+ 1860 MHz
+ 1845 MHz
+ 1830 MHz
+ 1815 MHz
+ 1800 MHz
+ 1785 MHz
+ 1770 MHz
+ 1755 MHz
+ 1740 MHz
+ 1725 MHz
+ 1710 MHz
+ 1695 MHz
+ 1680 MHz
+ 1665 MHz
+ 1650 MHz
+ 1635 MHz
+ 1620 MHz
+ 1605 MHz
+ 1590 MHz
+ 1575 MHz
+ 1560 MHz
+ 1545 MHz
+ 1530 MHz
+ 1515 MHz
+ 1500 MHz
+ 1485 MHz
+ 1470 MHz
+ 1455 MHz
+ 1440 MHz
+ 1425 MHz
+ 1410 MHz
+ 1395 MHz
+ 1380 MHz
+ 1365 MHz
+ 1350 MHz
+ 1335 MHz
+ 1320 MHz
+ 1305 MHz
+ 1290 MHz
+ 1275 MHz
+ 1260 MHz
+ 1245 MHz
+ 1230 MHz
+ 1215 MHz
+ 1200 MHz
+ 1185 MHz
+ 1170 MHz
+ 1155 MHz
+ 1140 MHz
+ 1125 MHz
+ 1110 MHz
+ 1095 MHz
+ 1080 MHz
+ 1065 MHz
+ 1050 MHz
+ 1035 MHz
+ 1020 MHz
+ 1005 MHz
+ 990 MHz
+ 975 MHz
+ 960 MHz
+ 945 MHz
+ 930 MHz
+ 915 MHz
+ 900 MHz
+ 885 MHz
+ 870 MHz
+ 855 MHz
+ 840 MHz
+ 825 MHz
+ 810 MHz
+ 795 MHz
+ 780 MHz
+ 765 MHz
+ 750 MHz
+ 735 MHz
+ 720 MHz
+ 705 MHz
+ 690 MHz
+ 675 MHz
+ 660 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+ 405 MHz
+ 645 MHz
+ 630 MHz
+ 615 MHz
+ 600 MHz
+ 585 MHz
+ 570 MHz
+ 555 MHz
+ 540 MHz
+ 525 MHz
+ 510 MHz
+ 495 MHz
+ 480 MHz
+ 465 MHz
+ 450 MHz
+ 435 MHz
+ 420 MHz
+ 405 MHz
+ 390 MHz
+ 375 MHz
+ 360 MHz
+ 345 MHz
+ 330 MHz
+ 315 MHz
+ 300 MHz
+ 285 MHz
+ 270 MHz
+ 255 MHz
+ 240 MHz
+ 225 MHz
+ 210 MHz
+
+
+
+
+ 16870
+ C
+ ffmpeg
+ 549 MiB
+
+
+
+
+
+ disabled
+
+
+
+
\ No newline at end of file
diff --git a/resources/psutil/gpu/nvidia/fixtures/query3.xml b/resources/psutil/gpu/nvidia/fixtures/query3.xml
new file mode 100644
index 00000000..86d6ec33
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/fixtures/query3.xml
@@ -0,0 +1,242 @@
+
+
+
+ Mon Jul 15 15:24:14 2024
+ 440.33.01
+ 10.2
+ 1
+
+ GeForce GTX 1080
+ GeForce
+ Disabled
+ Disabled
+ Disabled
+ Disabled
+ 4000
+
+ N/A
+ N/A
+
+ N/A
+ GPU-bf6e9a3a-e0bb-c253-45b4-34c99ec25512
+ 0
+ 86.04.17.00.01
+ No
+ 0x100
+ N/A
+
+ G001.0000.01.03
+ 1.1
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ None
+ N/A
+
+
+ N/A
+
+
+ 01
+ 00
+ 0000
+ 1B8010DE
+ 00000000:01:00.0
+ 119E10DE
+
+
+ 3
+ 3
+
+
+ 16x
+ 16x
+
+
+
+ N/A
+ N/A
+
+ 0
+ 0
+ 783000 KB/s
+ 1269000 KB/s
+
+ 53 %
+ P2
+
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+ Not Active
+
+
+ 8119 MiB
+ 2006 MiB
+ 6113 MiB
+
+
+ 256 MiB
+ 2 MiB
+ 254 MiB
+
+ Default
+
+ 32 %
+ 11 %
+ 17 %
+ 25 %
+
+
+ 0
+ 0
+ 0
+
+
+ 0
+ 0
+ 0
+
+
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+ N/A
+
+
+
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+ N/A
+
+
+ 65 C
+ 99 C
+ 96 C
+ N/A
+ N/A
+ N/A
+
+
+ P2
+ Supported
+ 89.74 W
+ 180.00 W
+ 180.00 W
+ 180.00 W
+ 90.00 W
+ 180.00 W
+
+
+ 1885 MHz
+ 1885 MHz
+ 4513 MHz
+ 1695 MHz
+
+
+ N/A
+ N/A
+
+
+ N/A
+ N/A
+
+
+ 1911 MHz
+ 1911 MHz
+ 5005 MHz
+ 1708 MHz
+
+
+ N/A
+
+
+ N/A
+ N/A
+
+ N/A
+
+
+ 10131
+ C
+ ffmpeg
+ 389 MiB
+
+
+ 13597
+ C
+ ffmpeg
+ 1054 MiB
+
+
+ 16870
+ C
+ ffmpeg
+ 549 MiB
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/resources/psutil/gpu/nvidia/nvidia.go b/resources/psutil/gpu/nvidia/nvidia.go
new file mode 100644
index 00000000..c46bbce1
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/nvidia.go
@@ -0,0 +1,519 @@
+package nvidia
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "fmt"
+ "os/exec"
+ "regexp"
+ "slices"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/datarhei/core/v16/resources/psutil/gpu"
+)
+
+type Megabytes uint64
+
+func (m *Megabytes) UnmarshalText(text []byte) error {
+ value := uint64(0)
+ _, err := fmt.Sscanf(string(text), "%d MiB", &value)
+ if err != nil {
+ return err
+ }
+
+ *m = Megabytes(value * 1024 * 1024)
+
+ return nil
+}
+
+type Utilization float64
+
+func (u *Utilization) UnmarshalText(text []byte) error {
+ value := float64(0)
+ _, err := fmt.Sscanf(string(text), "%f %%", &value)
+ if err != nil {
+ return err
+ }
+
+ *u = Utilization(value)
+
+ return nil
+}
+
+type Process struct {
+ Index int
+ PID int32
+ Memory uint64 // bytes
+
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+
+ lastSeen time.Time
+}
+
+type GPUStats struct {
+ ID string `xml:"id,attr"`
+ Name string `xml:"product_name"`
+ Architecture string `xml:"product_architecture"`
+
+ MemoryTotal Megabytes `xml:"fb_memory_usage>total"`
+ MemoryUsed Megabytes `xml:"fb_memory_usage>used"`
+
+ Usage Utilization `xml:"utilization>gpu_util"`
+ UsageEncoder Utilization `xml:"utilization>encoder_util"`
+ UsageDecoder Utilization `xml:"utilization>decoder_util"`
+}
+
+type Stats struct {
+ GPU []GPUStats `xml:"gpu"`
+}
+
+type nvidia struct {
+ wrQuery *writerQuery
+ wrProcess *writerProcess
+
+ lock sync.RWMutex
+ cancel context.CancelFunc
+ stats Stats
+ process map[int32]Process
+ err error
+}
+
+type dummy struct{}
+
+func (d *dummy) Count() (int, error) { return 0, nil }
+func (d *dummy) Stats() ([]gpu.Stats, error) { return nil, nil }
+func (d *dummy) Process(pid int32) (gpu.Process, error) { return gpu.Process{}, gpu.ErrProcessNotFound }
+func (d *dummy) Close() {}
+
+type writerQuery struct {
+ buf bytes.Buffer
+ ch chan Stats
+ terminator []byte
+}
+
+func (w *writerQuery) Write(data []byte) (int, error) {
+ n, err := w.buf.Write(data)
+ if err != nil {
+ return n, err
+ }
+
+ for {
+ idx := bytes.Index(w.buf.Bytes(), w.terminator)
+ if idx == -1 {
+ break
+ }
+
+ content := make([]byte, idx+len(w.terminator))
+ n, err := w.buf.Read(content)
+ if err != nil || n != len(content) {
+ break
+ }
+
+ s, err := parseQuery(content)
+ if err != nil {
+ continue
+ }
+
+ w.ch <- s
+ }
+
+ return n, nil
+}
+
+func parseQuery(data []byte) (Stats, error) {
+ nv := Stats{}
+
+ err := xml.Unmarshal(data, &nv)
+ if err != nil {
+ return nv, fmt.Errorf("parsing report: %w", err)
+ }
+
+ return nv, nil
+}
+
+type writerProcess struct {
+ buf bytes.Buffer
+ ch chan Process
+ terminator []byte
+}
+
+func (w *writerProcess) Write(data []byte) (int, error) {
+ n, err := w.buf.Write(data)
+ if err != nil {
+ return n, err
+ }
+
+ for {
+ idx := bytes.Index(w.buf.Bytes(), w.terminator)
+ if idx == -1 {
+ break
+ }
+
+ content := make([]byte, idx+len(w.terminator))
+ n, err := w.buf.Read(content)
+ if err != nil || n != len(content) {
+ break
+ }
+
+ s, err := parseProcess(content)
+ if err != nil {
+ continue
+ }
+
+ w.ch <- s
+ }
+
+ return n, nil
+}
+
+const processMatcher = `^\s*([0-9]+)\s+([0-9]+)\s+[A-Z]\s+([0-9-]+)\s+[0-9-]+\s+([0-9-]+)\s+([0-9-]+)\s+([0-9]+).*`
+
+// # gpu pid type sm mem enc dec fb command
+// # Idx # C/G % % % % MB name
+//
+// 0 7372 C 2 0 2 - 136 ffmpeg
+// 0 12176 C 5 2 3 7 782 ffmpeg
+// 0 20035 C 8 2 4 1 1145 ffmpeg
+// 0 20141 C 2 1 1 3 429 ffmpeg
+// 0 29591 C 2 1 - 2 435 ffmpeg
+var reProcessMatcher = regexp.MustCompile(processMatcher)
+
+func parseProcess(data []byte) (Process, error) {
+ p := Process{}
+
+ if len(data) == 0 {
+ return p, fmt.Errorf("empty line")
+ }
+
+ if data[0] == '#' {
+ return p, fmt.Errorf("comment")
+ }
+
+ matches := reProcessMatcher.FindStringSubmatch(string(data))
+ if matches == nil {
+ return p, fmt.Errorf("no matches found")
+ }
+
+ if len(matches) != 7 {
+ return p, fmt.Errorf("not the expected number of matches found")
+ }
+
+ if d, err := strconv.ParseInt(matches[1], 10, 0); err == nil {
+ p.Index = int(d)
+ }
+
+ if d, err := strconv.ParseInt(matches[2], 10, 32); err == nil {
+ p.PID = int32(d)
+ }
+
+ if matches[3][0] != '-' {
+ if d, err := strconv.ParseFloat(matches[3], 64); err == nil {
+ p.Usage = d
+ }
+ }
+
+ if matches[4][0] != '-' {
+ if d, err := strconv.ParseFloat(matches[4], 64); err == nil {
+ p.Encoder = d
+ }
+ }
+
+ if matches[5][0] != '-' {
+ if d, err := strconv.ParseFloat(matches[5], 64); err == nil {
+ p.Decoder = d
+ }
+ }
+
+ if d, err := strconv.ParseUint(matches[6], 10, 64); err == nil {
+ p.Memory = d * 1024 * 1024
+ }
+
+ return p, nil
+}
+
+func New(path string) gpu.GPU {
+ if len(path) == 0 {
+ path = "nvidia-smi"
+ }
+
+ path, err := exec.LookPath(path)
+ if err != nil {
+ return &dummy{}
+ }
+
+ n := &nvidia{
+ process: map[int32]Process{},
+ }
+
+ stats, err := n.runQueryOnce(path)
+ if err != nil {
+ return &dummy{}
+ }
+
+ n.stats = stats
+
+ process, err := n.runProcessOnce(path)
+ if err != nil {
+ return &dummy{}
+ }
+
+ n.process = process
+
+ n.wrQuery = &writerQuery{
+ ch: make(chan Stats, 1),
+ terminator: []byte("\n"),
+ }
+ n.wrProcess = &writerProcess{
+ ch: make(chan Process, 32),
+ terminator: []byte("\n"),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ n.cancel = cancel
+
+ go n.reader(ctx)
+ go n.runnerQuery(ctx, path)
+ go n.runnerProcess(ctx, path)
+
+ return n
+}
+
+func (n *nvidia) reader(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case stats := <-n.wrQuery.ch:
+ n.lock.Lock()
+ n.stats = stats
+ n.lock.Unlock()
+ case process := <-n.wrProcess.ch:
+ process.lastSeen = time.Now()
+ n.lock.Lock()
+ n.process[process.PID] = process
+
+ for pid, p := range n.process {
+ if time.Since(p.lastSeen) > 11*time.Second {
+ delete(n.process, pid)
+ }
+ }
+ n.lock.Unlock()
+ }
+ }
+}
+
+func (n *nvidia) runQueryOnce(path string) (Stats, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ data := &bytes.Buffer{}
+
+ cmd := exec.CommandContext(ctx, path, "-q", "-x")
+ cmd.Stdout = data
+ err := cmd.Start()
+ if err != nil {
+ return Stats{}, err
+ }
+
+ err = cmd.Wait()
+ if err != nil {
+ return Stats{}, err
+ }
+
+ stats, err := parseQuery(data.Bytes())
+ if err != nil {
+ return Stats{}, err
+ }
+
+ return stats, nil
+}
+
+func (n *nvidia) runnerQuery(ctx context.Context, path string) {
+ for {
+ cmd := exec.CommandContext(ctx, path, "-q", "-x", "-l", "1")
+ cmd.Stdout = n.wrQuery
+ err := cmd.Start()
+ if err != nil {
+ n.lock.Lock()
+ n.err = err
+ n.lock.Unlock()
+
+ time.Sleep(3 * time.Second)
+ continue
+ }
+
+ err = cmd.Wait()
+
+ n.lock.Lock()
+ n.err = err
+ n.lock.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ }
+}
+
+func (n *nvidia) runProcessOnce(path string) (map[int32]Process, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ data := &bytes.Buffer{}
+
+ cmd := exec.CommandContext(ctx, path, "pmon", "-s", "um", "-c", "1")
+ cmd.Stdout = data
+ err := cmd.Start()
+ if err != nil {
+ return nil, err
+ }
+
+ err = cmd.Wait()
+ if err != nil {
+ return nil, err
+ }
+
+ lines := bytes.Split(data.Bytes(), []byte{'\n'})
+
+ process := map[int32]Process{}
+
+ for _, line := range lines {
+ p, err := parseProcess(line)
+ if err != nil {
+ continue
+ }
+
+ process[p.PID] = p
+ }
+
+ return process, nil
+}
+
+func (n *nvidia) runnerProcess(ctx context.Context, path string) {
+ for {
+ cmd := exec.CommandContext(ctx, path, "pmon", "-s", "um", "-d", "5")
+ cmd.Stdout = n.wrProcess
+ err := cmd.Start()
+ if err != nil {
+ n.lock.Lock()
+ n.err = err
+ n.lock.Unlock()
+
+ time.Sleep(3 * time.Second)
+ continue
+ }
+
+ err = cmd.Wait()
+
+ n.lock.Lock()
+ n.err = err
+ n.lock.Unlock()
+
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ }
+}
+
+func (n *nvidia) Count() (int, error) {
+ n.lock.RLock()
+ defer n.lock.RUnlock()
+
+ if n.err != nil {
+ return 0, n.err
+ }
+
+ return len(n.stats.GPU), nil
+}
+
+func (n *nvidia) Stats() ([]gpu.Stats, error) {
+ stats := []gpu.Stats{}
+
+ n.lock.RLock()
+ defer n.lock.RUnlock()
+
+ if n.err != nil {
+ return stats, n.err
+ }
+
+ for i, nv := range n.stats.GPU {
+ s := gpu.Stats{
+ Index: i,
+ ID: nv.ID,
+ Name: nv.Name,
+ Architecture: nv.Architecture,
+ MemoryTotal: uint64(nv.MemoryTotal),
+ MemoryUsed: uint64(nv.MemoryUsed),
+ Usage: float64(nv.Usage),
+ Encoder: float64(nv.UsageEncoder),
+ Decoder: float64(nv.UsageDecoder),
+ Process: []gpu.Process{},
+ }
+
+ stats = append(stats, s)
+ }
+
+ for _, p := range n.process {
+ if p.Index >= len(stats) {
+ continue
+ }
+
+ stats[p.Index].Process = append(stats[p.Index].Process, gpu.Process{
+ PID: p.PID,
+ Index: p.Index,
+ Memory: p.Memory,
+ Usage: p.Usage,
+ Encoder: p.Encoder,
+ Decoder: p.Decoder,
+ })
+ }
+
+ for i := range stats {
+ p := stats[i].Process
+ slices.SortFunc(p, func(a, b gpu.Process) int {
+ return int(a.PID - b.PID)
+ })
+ stats[i].Process = p
+ }
+
+ return stats, nil
+}
+
+func (n *nvidia) Process(pid int32) (gpu.Process, error) {
+ n.lock.RLock()
+ defer n.lock.RUnlock()
+
+ p, hasProcess := n.process[pid]
+ if hasProcess {
+ return gpu.Process{
+ PID: p.PID,
+ Index: p.Index,
+ Memory: p.Memory,
+ Usage: p.Usage,
+ Encoder: p.Encoder,
+ Decoder: p.Decoder,
+ }, nil
+ }
+
+ return gpu.Process{Index: -1}, gpu.ErrProcessNotFound
+}
+
+func (n *nvidia) Close() {
+ n.lock.Lock()
+ defer n.lock.Unlock()
+
+ if n.cancel == nil {
+ return
+ }
+
+ n.cancel()
+ n.cancel = nil
+}
diff --git a/resources/psutil/gpu/nvidia/nvidia_test.go b/resources/psutil/gpu/nvidia/nvidia_test.go
new file mode 100644
index 00000000..918d235e
--- /dev/null
+++ b/resources/psutil/gpu/nvidia/nvidia_test.go
@@ -0,0 +1,418 @@
+package nvidia
+
+import (
+ "bytes"
+ "os"
+ "sync"
+ "testing"
+
+ "github.com/datarhei/core/v16/internal/testhelper"
+ "github.com/datarhei/core/v16/resources/psutil/gpu"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseQuery(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/query1.xml")
+ require.NoError(t, err)
+
+ nv, err := parseQuery(data)
+ require.NoError(t, err)
+
+ require.Equal(t, Stats{
+ GPU: []GPUStats{
+ {
+ ID: "00000000:01:00.0",
+ Name: "NVIDIA GeForce GTX 1080",
+ Architecture: "Pascal",
+ MemoryTotal: 8119 * 1024 * 1024,
+ MemoryUsed: 918 * 1024 * 1024,
+ Usage: 15,
+ UsageEncoder: 3,
+ UsageDecoder: 0,
+ },
+ },
+ }, nv)
+
+ data, err = os.ReadFile("./fixtures/query2.xml")
+ require.NoError(t, err)
+
+ nv, err = parseQuery(data)
+ require.NoError(t, err)
+
+ require.Equal(t, Stats{
+ GPU: []GPUStats{
+ {
+ ID: "00000000:01:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 2,
+ UsageEncoder: 0,
+ UsageDecoder: 0,
+ },
+ {
+ ID: "00000000:C1:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 3,
+ UsageEncoder: 0,
+ UsageDecoder: 0,
+ },
+ },
+ }, nv)
+
+ data, err = os.ReadFile("./fixtures/query3.xml")
+ require.NoError(t, err)
+
+ nv, err = parseQuery(data)
+ require.NoError(t, err)
+
+ require.Equal(t, Stats{
+ GPU: []GPUStats{
+ {
+ ID: "00000000:01:00.0",
+ Name: "GeForce GTX 1080",
+ MemoryTotal: 8119 * 1024 * 1024,
+ MemoryUsed: 2006 * 1024 * 1024,
+ Usage: 32,
+ UsageEncoder: 17,
+ UsageDecoder: 25,
+ },
+ },
+ }, nv)
+}
+
+func TestParseProcess(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/process.txt")
+ require.NoError(t, err)
+
+ lines := bytes.Split(data, []byte("\n"))
+ process := map[int32]Process{}
+
+ for _, line := range lines {
+ p, err := parseProcess(line)
+ if err != nil {
+ continue
+ }
+
+ process[p.PID] = p
+ }
+
+ require.Equal(t, map[int32]Process{
+ 7372: {
+ Index: 0,
+ PID: 7372,
+ Memory: 136 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 2,
+ Decoder: 0,
+ },
+ 12176: {
+ Index: 0,
+ PID: 12176,
+ Memory: 782 * 1024 * 1024,
+ Usage: 7,
+ Encoder: 2,
+ Decoder: 6,
+ },
+ 20035: {
+ Index: 0,
+ PID: 20035,
+ Memory: 1145 * 1024 * 1024,
+ Usage: 7,
+ Encoder: 4,
+ Decoder: 3,
+ },
+ 20141: {
+ Index: 0,
+ PID: 20141,
+ Memory: 429 * 1024 * 1024,
+ Usage: 5,
+ Encoder: 1,
+ Decoder: 3,
+ },
+ 29591: {
+ Index: 0,
+ PID: 29591,
+ Memory: 435 * 1024 * 1024,
+ Usage: 0,
+ Encoder: 1,
+ Decoder: 1,
+ },
+ }, process)
+}
+
+func TestParseProcessNoProcesses(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/process_noprocesses.txt")
+ require.NoError(t, err)
+
+ lines := bytes.Split(data, []byte("\n"))
+ process := map[int32]Process{}
+
+ for _, line := range lines {
+ p, err := parseProcess(line)
+ if err != nil {
+ continue
+ }
+
+ process[p.PID] = p
+ }
+
+ require.Equal(t, map[int32]Process{}, process)
+}
+
+func TestWriterQuery(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/query2.xml")
+ require.NoError(t, err)
+
+ wr := &writerQuery{
+ ch: make(chan Stats, 1),
+ terminator: []byte(""),
+ }
+
+ stats := Stats{}
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ for s := range wr.ch {
+ stats = s
+ }
+ }()
+
+ _, err = wr.Write(data)
+ require.NoError(t, err)
+
+ close(wr.ch)
+
+ wg.Wait()
+
+ require.Equal(t, Stats{
+ GPU: []GPUStats{
+ {
+ ID: "00000000:01:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 2,
+ UsageEncoder: 0,
+ UsageDecoder: 0,
+ },
+ {
+ ID: "00000000:C1:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 3,
+ UsageEncoder: 0,
+ UsageDecoder: 0,
+ },
+ },
+ }, stats)
+}
+
+func TestWriterProcess(t *testing.T) {
+ data, err := os.ReadFile("./fixtures/process.txt")
+ require.NoError(t, err)
+
+ wr := &writerProcess{
+ ch: make(chan Process, 32),
+ terminator: []byte("\n"),
+ }
+
+ process := map[int32]Process{}
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+ for p := range wr.ch {
+ process[p.PID] = p
+ }
+ }()
+
+ _, err = wr.Write(data)
+ require.NoError(t, err)
+
+ close(wr.ch)
+
+ wg.Wait()
+
+ require.Equal(t, map[int32]Process{
+ 7372: {
+ Index: 0,
+ PID: 7372,
+ Memory: 136 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 2,
+ Decoder: 0,
+ },
+ 12176: {
+ Index: 0,
+ PID: 12176,
+ Memory: 782 * 1024 * 1024,
+ Usage: 7,
+ Encoder: 2,
+ Decoder: 6,
+ },
+ 20035: {
+ Index: 0,
+ PID: 20035,
+ Memory: 1145 * 1024 * 1024,
+ Usage: 7,
+ Encoder: 4,
+ Decoder: 3,
+ },
+ 20141: {
+ Index: 0,
+ PID: 20141,
+ Memory: 429 * 1024 * 1024,
+ Usage: 5,
+ Encoder: 1,
+ Decoder: 3,
+ },
+ 29591: {
+ Index: 0,
+ PID: 29591,
+ Memory: 435 * 1024 * 1024,
+ Usage: 0,
+ Encoder: 1,
+ Decoder: 1,
+ },
+ }, process)
+}
+
+func TestNvidiaGPUCount(t *testing.T) {
+ binary, err := testhelper.BuildBinary("nvidia-smi")
+ require.NoError(t, err, "Failed to build helper program")
+
+ nv := New(binary)
+
+ t.Cleanup(func() {
+ nv.Close()
+ })
+
+ _, ok := nv.(*dummy)
+ require.False(t, ok)
+
+ count, err := nv.Count()
+ require.NoError(t, err)
+ require.NotEqual(t, 0, count)
+}
+
+func TestNvidiaGPUStats(t *testing.T) {
+ binary, err := testhelper.BuildBinary("nvidia-smi")
+ require.NoError(t, err, "Failed to build helper program")
+
+ nv := New(binary)
+
+ t.Cleanup(func() {
+ nv.Close()
+ })
+
+ _, ok := nv.(*dummy)
+ require.False(t, ok)
+
+ stats, err := nv.Stats()
+ require.NoError(t, err)
+ require.Equal(t, []gpu.Stats{
+ {
+ Index: 0,
+ ID: "00000000:01:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 0,
+ Decoder: 0,
+ Process: []gpu.Process{
+ {
+ Index: 0,
+ PID: 7372,
+ Memory: 136 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 2,
+ Decoder: 0,
+ },
+ {
+ Index: 0,
+ PID: 12176,
+ Memory: 782 * 1024 * 1024,
+ Usage: 5,
+ Encoder: 3,
+ Decoder: 7,
+ },
+ {
+ Index: 0,
+ PID: 29591,
+ Memory: 435 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 0,
+ Decoder: 2,
+ },
+ },
+ },
+ {
+ Index: 1,
+ ID: "00000000:C1:00.0",
+ Name: "NVIDIA L4",
+ Architecture: "Ada Lovelace",
+ MemoryTotal: 23034 * 1024 * 1024,
+ MemoryUsed: 1 * 1024 * 1024,
+ Usage: 3,
+ Encoder: 0,
+ Decoder: 0,
+ Process: []gpu.Process{
+ {
+ Index: 1,
+ PID: 20035,
+ Memory: 1145 * 1024 * 1024,
+ Usage: 8,
+ Encoder: 4,
+ Decoder: 1,
+ },
+ {
+ Index: 1,
+ PID: 20141,
+ Memory: 429 * 1024 * 1024,
+ Usage: 2,
+ Encoder: 1,
+ Decoder: 3,
+ },
+ },
+ },
+ }, stats)
+}
+
+func TestNvidiaGPUProcess(t *testing.T) {
+ binary, err := testhelper.BuildBinary("nvidia-smi")
+ require.NoError(t, err, "Failed to build helper program")
+
+ nv := New(binary)
+
+ t.Cleanup(func() {
+ nv.Close()
+ })
+
+ _, ok := nv.(*dummy)
+ require.False(t, ok)
+
+ proc, err := nv.Process(12176)
+ require.NoError(t, err)
+ require.Equal(t, gpu.Process{
+ Index: 0,
+ PID: 12176,
+ Memory: 782 * 1024 * 1024,
+ Usage: 5,
+ Encoder: 3,
+ Decoder: 7,
+ }, proc)
+}
diff --git a/psutil/process.go b/resources/psutil/process.go
similarity index 74%
rename from psutil/process.go
rename to resources/psutil/process.go
index 0789f553..bb2f9064 100644
--- a/psutil/process.go
+++ b/resources/psutil/process.go
@@ -5,24 +5,28 @@ import (
"sync"
"time"
+ "github.com/datarhei/core/v16/resources/psutil/gpu"
psprocess "github.com/shirou/gopsutil/v3/process"
)
type Process interface {
- // CPUPercent returns the current CPU load for this process only. The values
+ // CPU returns the current CPU load for this process only. The values
// are normed to the range of 0 to 100.
- CPUPercent() (*CPUInfoStat, error)
+ CPU() (*CPUInfo, error)
- // VirtualMemory returns the current memory usage in bytes of this process only.
- VirtualMemory() (uint64, error)
+ // Memory returns the current memory usage in bytes of this process only.
+ Memory() (uint64, error)
- // Stop will stop collecting CPU and memory data for this process.
- Stop()
+ // GPU returns the current GPU memory in bytes and usage in percent (0-100) of this process only.
+ GPU() (*GPUInfo, error)
- // Suspend will send SIGSTOP to the process
+ // Cancel will stop collecting CPU and memory data for this process.
+ Cancel()
+
+ // Suspend will send SIGSTOP to the process.
Suspend() error
- // Resume will send SIGCONT to the process
+ // Resume will send SIGCONT to the process.
Resume() error
}
@@ -42,6 +46,8 @@ type process struct {
statPreviousTime time.Time
nTicks uint64
memRSS uint64
+
+ gpu gpu.GPU
}
func (u *util) Process(pid int32) (Process, error) {
@@ -50,6 +56,7 @@ func (u *util) Process(pid int32) (Process, error) {
hasCgroup: u.hasCgroup,
cpuLimit: u.cpuLimit,
ncpu: u.ncpu,
+ gpu: u.gpu,
}
proc, err := psprocess.NewProcess(pid)
@@ -67,10 +74,6 @@ func (u *util) Process(pid int32) (Process, error) {
return p, nil
}
-func NewProcess(pid int32, limit bool) (Process, error) {
- return DefaultUtil.Process(pid)
-}
-
func (p *process) tickCPU(ctx context.Context, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
@@ -130,7 +133,7 @@ func (p *process) collectMemory() uint64 {
return info.RSS
}
-func (p *process) Stop() {
+func (p *process) Cancel() {
p.stopTicker()
}
@@ -142,7 +145,7 @@ func (p *process) Resume() error {
return p.proc.Resume()
}
-func (p *process) CPUPercent() (*CPUInfoStat, error) {
+func (p *process) CPU() (*CPUInfo, error) {
var diff float64
for {
@@ -167,7 +170,7 @@ func (p *process) CPUPercent() (*CPUInfoStat, error) {
diff = p.statCurrentTime.Sub(p.statPreviousTime).Seconds() * p.ncpu
}
- s := &CPUInfoStat{
+ s := &CPUInfo{
System: 0,
User: 0,
Idle: 0,
@@ -186,9 +189,28 @@ func (p *process) CPUPercent() (*CPUInfoStat, error) {
return s, nil
}
-func (p *process) VirtualMemory() (uint64, error) {
+func (p *process) Memory() (uint64, error) {
p.lock.RLock()
defer p.lock.RUnlock()
return p.memRSS, nil
}
+
+func (p *process) GPU() (*GPUInfo, error) {
+ info := &GPUInfo{
+ Index: -1,
+ }
+
+ proc, err := p.gpu.Process(p.pid)
+ if err != nil {
+ return info, nil
+ }
+
+ info.Index = proc.Index
+ info.MemoryUsed = proc.Memory
+ info.Usage = proc.Usage
+ info.Encoder = proc.Encoder
+ info.Decoder = proc.Decoder
+
+ return info, nil
+}
diff --git a/psutil/process_linux.go b/resources/psutil/process_linux.go
similarity index 100%
rename from psutil/process_linux.go
rename to resources/psutil/process_linux.go
diff --git a/psutil/process_other.go b/resources/psutil/process_other.go
similarity index 100%
rename from psutil/process_other.go
rename to resources/psutil/process_other.go
diff --git a/psutil/psutil.go b/resources/psutil/psutil.go
similarity index 74%
rename from psutil/psutil.go
rename to resources/psutil/psutil.go
index 0af65387..89070762 100644
--- a/psutil/psutil.go
+++ b/resources/psutil/psutil.go
@@ -13,6 +13,8 @@ import (
"sync"
"time"
+ psutilgpu "github.com/datarhei/core/v16/resources/psutil/gpu"
+
"github.com/shirou/gopsutil/v3/cpu"
"github.com/shirou/gopsutil/v3/disk"
"github.com/shirou/gopsutil/v3/mem"
@@ -39,25 +41,47 @@ var cgroup2Files = []string{
// https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sect-cpu-example_usage
// https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
-var DefaultUtil Util
-
-func init() {
- DefaultUtil, _ = New("/sys/fs/cgroup")
+type DiskInfo struct {
+ Path string
+ Fstype string
+ Total uint64
+ Used uint64
+ InodesTotal uint64
+ InodesUsed uint64
}
-type MemoryInfoStat struct {
+type MemoryInfo struct {
Total uint64 // bytes
Available uint64 // bytes
Used uint64 // bytes
}
-type CPUInfoStat struct {
+type NetworkInfo struct {
+ Name string // interface name
+ BytesSent uint64 // number of bytes sent
+ BytesRecv uint64 // number of bytes received
+}
+
+type CPUInfo struct {
System float64 // percent 0-100
User float64 // percent 0-100
Idle float64 // percent 0-100
Other float64 // percent 0-100
}
+type GPUInfo struct {
+ Index int // Index of the GPU
+ ID string // Physical ID of the GPU (not populated for a specific process)
+ Name string // Name of the GPU (not populated for a specific process)
+
+ MemoryTotal uint64 // bytes (not populated for a specific process)
+ MemoryUsed uint64 // bytes
+
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+}
+
type cpuTimesStat struct {
total float64 // seconds
system float64 // seconds
@@ -67,18 +91,26 @@ type cpuTimesStat struct {
}
type Util interface {
- Start()
- Stop()
+ Cancel()
// CPUCounts returns the number of cores, either logical or physical.
- CPUCounts(logical bool) (float64, error)
+ CPUCounts() (float64, error)
- // CPUPercent returns the current CPU load in percent. The values range
+ // CPU returns the current CPU load in percent. The values range
// from 0 to 100, independently of the number of logical cores.
- CPUPercent() (*CPUInfoStat, error)
- DiskUsage(path string) (*disk.UsageStat, error)
- VirtualMemory() (*MemoryInfoStat, error)
- NetIOCounters(pernic bool) ([]net.IOCountersStat, error)
+ CPU() (*CPUInfo, error)
+
+ // Disk returns the current usage of the partition specified by the path.
+ Disk(path string) (*DiskInfo, error)
+
+ // Memory return the current memory usage.
+ Memory() (*MemoryInfo, error)
+
+ // Network returns the current network interface statistics per network adapter.
+ Network() ([]NetworkInfo, error)
+
+ // GPU return the current usage for each CPU.
+ GPU() ([]GPUInfo, error)
// Process returns a process observer for a process with the given pid.
Process(pid int32) (Process, error)
@@ -102,11 +134,17 @@ type util struct {
statPrevious cpuTimesStat
statPreviousTime time.Time
nTicks uint64
- mem MemoryInfoStat
+ mem MemoryInfo
+
+ gpu psutilgpu.GPU
}
// New returns a new util, it will be started automatically
-func New(root string) (Util, error) {
+func New(root string, gpu psutilgpu.GPU) (Util, error) {
+ if len(root) == 0 {
+ root = "/sys/fs/cgroup"
+ }
+
u := &util{
root: os.DirFS(root),
}
@@ -122,7 +160,7 @@ func New(root string) (Util, error) {
if u.ncpu == 0 {
var err error
- u.ncpu, err = u.CPUCounts(true)
+ u.ncpu, err = u.CPUCounts()
if err != nil {
return nil, err
}
@@ -135,24 +173,23 @@ func New(root string) (Util, error) {
u.mem = *mem
- u.stopOnce.Do(func() {})
+ u.gpu = gpu
+ if u.gpu == nil {
+ u.gpu = psutilgpu.NewNilGPU()
+ }
- u.Start()
+ ctx, cancel := context.WithCancel(context.Background())
+ u.stopTicker = cancel
- return u, nil
-}
+ go u.tickCPU(ctx, time.Second)
+ go u.tickMemory(ctx, time.Second)
-func (u *util) Start() {
- u.startOnce.Do(func() {
- ctx, cancel := context.WithCancel(context.Background())
- u.stopTicker = cancel
+ u.stopOnce = sync.Once{}
- go u.tickCPU(ctx, time.Second)
- go u.tickMemory(ctx, time.Second)
- })
+ return u, nil
}
-func (u *util) Stop() {
+func (u *util) Cancel() {
u.stopOnce.Do(func() {
u.stopTicker()
@@ -293,7 +330,7 @@ func (u *util) tickMemory(ctx context.Context, interval time.Duration) {
}
}
-func (u *util) collectMemory() *MemoryInfoStat {
+func (u *util) collectMemory() *MemoryInfo {
stat, err := u.virtualMemory()
if err != nil {
return nil
@@ -302,12 +339,12 @@ func (u *util) collectMemory() *MemoryInfoStat {
return stat
}
-func (u *util) CPUCounts(logical bool) (float64, error) {
+func (u *util) CPUCounts() (float64, error) {
if u.hasCgroup && u.ncpu > 0 {
return u.ncpu, nil
}
- ncpu, err := cpu.Counts(logical)
+ ncpu, err := cpu.Counts(true)
if err != nil {
return 0, err
}
@@ -315,10 +352,6 @@ func (u *util) CPUCounts(logical bool) (float64, error) {
return float64(ncpu), nil
}
-func CPUCounts(logical bool) (float64, error) {
- return DefaultUtil.CPUCounts(logical)
-}
-
// cpuTimes returns the current cpu usage times in seconds.
func (u *util) cpuTimes() (*cpuTimesStat, error) {
if u.hasCgroup && u.cpuLimit > 0 {
@@ -353,7 +386,7 @@ func (u *util) cpuTimes() (*cpuTimesStat, error) {
return s, nil
}
-func (u *util) CPUPercent() (*CPUInfoStat, error) {
+func (u *util) CPU() (*CPUInfo, error) {
var total float64
for {
@@ -378,7 +411,7 @@ func (u *util) CPUPercent() (*CPUInfoStat, error) {
total = (u.statCurrent.total - u.statPrevious.total)
}
- s := &CPUInfoStat{
+ s := &CPUInfo{
System: 0,
User: 0,
Idle: 100,
@@ -401,10 +434,6 @@ func (u *util) CPUPercent() (*CPUInfoStat, error) {
return s, nil
}
-func CPUPercent() (*CPUInfoStat, error) {
- return DefaultUtil.CPUPercent()
-}
-
func (u *util) cgroupCPUTimes(version int) (*cpuTimesStat, error) {
info := &cpuTimesStat{}
@@ -438,15 +467,25 @@ func (u *util) cgroupCPUTimes(version int) (*cpuTimesStat, error) {
return info, nil
}
-func (u *util) DiskUsage(path string) (*disk.UsageStat, error) {
- return disk.Usage(path)
-}
+func (u *util) Disk(path string) (*DiskInfo, error) {
+ usage, err := disk.Usage(path)
+ if err != nil {
+ return nil, err
+ }
-func DiskUsage(path string) (*disk.UsageStat, error) {
- return DefaultUtil.DiskUsage(path)
+ info := &DiskInfo{
+ Path: usage.Path,
+ Fstype: usage.Fstype,
+ Total: usage.Total,
+ Used: usage.Used,
+ InodesTotal: usage.InodesTotal,
+ InodesUsed: usage.InodesUsed,
+ }
+
+ return info, nil
}
-func (u *util) virtualMemory() (*MemoryInfoStat, error) {
+func (u *util) virtualMemory() (*MemoryInfo, error) {
info, err := mem.VirtualMemory()
if err != nil {
return nil, err
@@ -461,18 +500,18 @@ func (u *util) virtualMemory() (*MemoryInfoStat, error) {
}
}
- return &MemoryInfoStat{
+ return &MemoryInfo{
Total: info.Total,
Available: info.Available,
Used: info.Used,
}, nil
}
-func (u *util) VirtualMemory() (*MemoryInfoStat, error) {
+func (u *util) Memory() (*MemoryInfo, error) {
u.lock.RLock()
defer u.lock.RUnlock()
- stat := &MemoryInfoStat{
+ stat := &MemoryInfo{
Total: u.mem.Total,
Available: u.mem.Available,
Used: u.mem.Used,
@@ -481,12 +520,8 @@ func (u *util) VirtualMemory() (*MemoryInfoStat, error) {
return stat, nil
}
-func VirtualMemory() (*MemoryInfoStat, error) {
- return DefaultUtil.VirtualMemory()
-}
-
-func (u *util) cgroupVirtualMemory(version int) (*MemoryInfoStat, error) {
- info := &MemoryInfoStat{}
+func (u *util) cgroupVirtualMemory(version int) (*MemoryInfo, error) {
+ info := &MemoryInfo{}
if version == 1 {
lines, err := u.readFile("memory/memory.limit_in_bytes")
@@ -541,12 +576,23 @@ func (u *util) cgroupVirtualMemory(version int) (*MemoryInfoStat, error) {
return info, nil
}
-func (u *util) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) {
- return net.IOCounters(pernic)
-}
+func (u *util) Network() ([]NetworkInfo, error) {
+ netio, err := net.IOCounters(true)
+ if err != nil {
+ return nil, err
+ }
+
+ info := []NetworkInfo{}
-func NetIOCounters(pernic bool) ([]net.IOCountersStat, error) {
- return DefaultUtil.NetIOCounters(pernic)
+ for _, io := range netio {
+ info = append(info, NetworkInfo{
+ Name: io.Name,
+ BytesSent: io.BytesSent,
+ BytesRecv: io.BytesRecv,
+ })
+ }
+
+ return info, nil
}
func (u *util) readFile(path string) ([]string, error) {
@@ -584,3 +630,27 @@ func cpuTotal(c *cpu.TimesStat) float64 {
return c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq +
c.Softirq + c.Steal + c.Guest + c.GuestNice
}
+
+func (u *util) GPU() ([]GPUInfo, error) {
+ nvstats, err := u.gpu.Stats()
+ if err != nil {
+ return nil, err
+ }
+
+ stats := []GPUInfo{}
+
+ for _, nv := range nvstats {
+ stats = append(stats, GPUInfo{
+ Index: nv.Index,
+ ID: nv.ID,
+ Name: nv.Name,
+ MemoryTotal: nv.MemoryTotal,
+ MemoryUsed: nv.MemoryUsed,
+ Usage: nv.Usage,
+ Encoder: nv.Encoder,
+ Decoder: nv.Decoder,
+ })
+ }
+
+ return stats, nil
+}
diff --git a/psutil/psutil_test.go b/resources/psutil/psutil_test.go
similarity index 98%
rename from psutil/psutil_test.go
rename to resources/psutil/psutil_test.go
index b4aaae9f..ae5b3095 100644
--- a/psutil/psutil_test.go
+++ b/resources/psutil/psutil_test.go
@@ -8,7 +8,7 @@ import (
)
func getUtil(path string) *util {
- u, _ := New(path)
+ u, _ := New(path, nil)
return u.(*util)
}
diff --git a/resources/resources.go b/resources/resources.go
index d7255f05..33edd2f2 100644
--- a/resources/resources.go
+++ b/resources/resources.go
@@ -8,12 +8,29 @@ import (
"time"
"github.com/datarhei/core/v16/log"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources/psutil"
+ "github.com/datarhei/core/v16/slices"
)
type Info struct {
Mem MemoryInfo
CPU CPUInfo
+ GPU GPUInfo
+}
+
+type DiskInfo struct {
+ Path string
+ Fstype string
+ Total uint64
+ Used uint64
+ InodesTotal uint64
+ InodesUsed uint64
+}
+
+type NetworkInfo struct {
+ Name string // interface name
+ BytesSent uint64 // number of bytes sent
+ BytesRecv uint64 // number of bytes received
}
type MemoryInfo struct {
@@ -38,6 +55,45 @@ type CPUInfo struct {
Error error
}
+type GPUInfo struct {
+ NGPU float64 // number of gpus
+ GPU []GPUInfoStat
+ Error error
+}
+
+type GPUInfoStat struct {
+ Index int
+ ID string
+ Name string
+
+ // Memory
+ MemoryTotal uint64 // bytes
+ MemoryUsed uint64 // bytes
+ MemoryAvailable uint64 // bytes
+ MemoryLimit uint64 // bytes
+
+ // GPU
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+ UsageLimit float64 // percent 0-100
+
+ Throttling bool
+}
+
+type Request struct {
+ CPU float64 // percent 0-100*ncpu
+ Memory uint64 // bytes
+ GPUUsage float64 // percent 0-100
+ GPUEncoder float64 // percent 0-100
+ GPUDecoder float64 // percent 0-100
+ GPUMemory uint64 // bytes
+}
+
+type Response struct {
+ GPU int // GPU number, hwdevice
+}
+
type resources struct {
psutil psutil.Util
@@ -45,52 +101,80 @@ type resources struct {
maxCPU float64 // percent 0-100*ncpu
maxMemory uint64 // bytes
+ ngpu int
+ maxGPU float64 // general usage, percent 0-100
+ maxGPUMemory float64 // memory usage, percent 0-100
+
isUnlimited bool
isCPULimiting bool
isMemoryLimiting bool
+ isGPULimiting []bool
self psutil.Process
cancelObserver context.CancelFunc
- lock sync.RWMutex
- startOnce sync.Once
- stopOnce sync.Once
+ lock sync.RWMutex
+ stopOnce sync.Once
logger log.Logger
}
type Resources interface {
- Start()
- Stop()
+ Cancel()
// HasLimits returns whether any limits have been set.
HasLimits() bool
- // Limits returns the CPU (percent 0-100) and memory (bytes) limits.
- Limits() (float64, uint64)
+ // Limits returns the CPU (percent 0-100), memory (bytes) limits, and GPU limits (usage and memory each in percent 0-100).
+ Limits() (float64, uint64, float64, float64)
- // ShouldLimit returns whether cpu and/or memory is currently limited.
- ShouldLimit() (bool, bool)
+ // ShouldLimit returns whether cpu, memory, and/or GPU is currently limited.
+ ShouldLimit() (bool, bool, []bool)
// Request checks whether the requested resources are available.
- Request(cpu float64, memory uint64) error
+ Request(req Request) (Response, error)
- // Info returns the current resource usage
+ // Info returns the current resource usage.
Info() Info
+
+ Disk(path string) (*DiskInfo, error)
+ Network() ([]NetworkInfo, error)
+
+ Process(pid int32) (Process, error)
}
type Config struct {
- MaxCPU float64 // percent 0-100
- MaxMemory float64 // percent 0-100
- PSUtil psutil.Util
- Logger log.Logger
+ MaxCPU float64 // percent 0-100
+ MaxMemory float64 // percent 0-100
+ MaxGPU float64 // general,encoder,decoder usage, percent 0-100
+ MaxGPUMemory float64 // memory usage, percent 0-100
+ PSUtil psutil.Util
+ Logger log.Logger
}
func New(config Config) (Resources, error) {
+ if config.PSUtil == nil {
+ psutil, err := psutil.New("", nil)
+ if err != nil {
+ return nil, fmt.Errorf("unable to initialize psutils: %w", err)
+ }
+ config.PSUtil = psutil
+ }
+
+ gpu, err := config.PSUtil.GPU()
+ if err != nil {
+ return nil, fmt.Errorf("unable to determine number of GPUs: %w", err)
+ }
+
+ if len(gpu) == 0 {
+ config.MaxGPU = 0
+ config.MaxGPUMemory = 0
+ }
+
isUnlimited := false
- if config.MaxCPU <= 0 && config.MaxMemory <= 0 {
+ if config.MaxCPU <= 0 && config.MaxMemory <= 0 && config.MaxGPU <= 0 && config.MaxGPUMemory <= 0 {
isUnlimited = true
}
@@ -102,31 +186,39 @@ func New(config Config) (Resources, error) {
config.MaxMemory = 100
}
- if config.MaxCPU > 100 || config.MaxMemory > 100 {
- return nil, fmt.Errorf("both MaxCPU and MaxMemory must have a range of 0-100")
+ if config.MaxGPU <= 0 {
+ config.MaxGPU = 100
+ }
+
+ if config.MaxGPUMemory <= 0 {
+ config.MaxGPUMemory = 100
+ }
+
+ if config.MaxCPU > 100 || config.MaxMemory > 100 || config.MaxGPU > 100 || config.MaxGPUMemory > 100 {
+ return nil, fmt.Errorf("all Max... values must have a range of 0-100")
}
r := &resources{
- maxCPU: config.MaxCPU,
- psutil: config.PSUtil,
- isUnlimited: isUnlimited,
- logger: config.Logger,
+ maxCPU: config.MaxCPU,
+ maxGPU: config.MaxGPU,
+ maxGPUMemory: config.MaxGPUMemory,
+ psutil: config.PSUtil,
+ isUnlimited: isUnlimited,
+ ngpu: len(gpu),
+ isGPULimiting: make([]bool, len(gpu)),
+ logger: config.Logger,
}
if r.logger == nil {
r.logger = log.New("")
}
- if r.psutil == nil {
- r.psutil = psutil.DefaultUtil
- }
-
- vmstat, err := r.psutil.VirtualMemory()
+ vmstat, err := r.psutil.Memory()
if err != nil {
return nil, fmt.Errorf("unable to determine available memory: %w", err)
}
- ncpu, err := r.psutil.CPUCounts(true)
+ ncpu, err := r.psutil.CPUCounts()
if err != nil {
return nil, fmt.Errorf("unable to determine number of logical CPUs: %w", err)
}
@@ -137,42 +229,38 @@ func New(config Config) (Resources, error) {
r.maxMemory = uint64(float64(vmstat.Total) * config.MaxMemory / 100)
r.logger = r.logger.WithFields(log.Fields{
- "ncpu": r.ncpu,
- "max_cpu": r.maxCPU,
- "max_memory": r.maxMemory,
+ "ncpu": r.ncpu,
+ "max_cpu": r.maxCPU,
+ "max_memory": r.maxMemory,
+ "ngpu": len(gpu),
+ "max_gpu": r.maxGPU,
+ "max_gpu_memory": r.maxGPUMemory,
})
- r.self, err = psutil.NewProcess(int32(os.Getpid()), false)
+ r.self, err = r.psutil.Process(int32(os.Getpid()))
if err != nil {
return nil, fmt.Errorf("unable to create process observer for self: %w", err)
}
r.logger.Debug().Log("Created")
- r.stopOnce.Do(func() {})
+ ctx, cancel := context.WithCancel(context.Background())
+ r.cancelObserver = cancel
- return r, nil
-}
-
-func (r *resources) Start() {
- r.startOnce.Do(func() {
- ctx, cancel := context.WithCancel(context.Background())
- r.cancelObserver = cancel
+ go r.observe(ctx, time.Second)
- go r.observe(ctx, time.Second)
+ r.stopOnce = sync.Once{}
- r.stopOnce = sync.Once{}
+ r.logger.Info().Log("Started")
- r.logger.Info().Log("Started")
- })
+ return r, nil
}
-func (r *resources) Stop() {
+func (r *resources) Cancel() {
r.stopOnce.Do(func() {
r.cancelObserver()
- r.self.Stop()
-
- r.startOnce = sync.Once{}
+ r.psutil.Cancel()
+ r.self.Cancel()
r.logger.Info().Log("Stopped")
})
@@ -189,7 +277,12 @@ func (r *resources) observe(ctx context.Context, interval time.Duration) {
case <-ctx.Done():
return
case <-ticker.C:
- cpustat, err := r.psutil.CPUPercent()
+ if r.isUnlimited {
+ // If there aren't any limits imposed, don't do anything
+ continue
+ }
+
+ cpustat, err := r.psutil.CPU()
if err != nil {
r.logger.Warn().WithError(err).Log("Failed to determine system CPU usage")
continue
@@ -197,12 +290,18 @@ func (r *resources) observe(ctx context.Context, interval time.Duration) {
cpuload := (cpustat.User + cpustat.System + cpustat.Other) * r.ncpu
- vmstat, err := r.psutil.VirtualMemory()
+ vmstat, err := r.psutil.Memory()
if err != nil {
r.logger.Warn().WithError(err).Log("Failed to determine system memory usage")
continue
}
+ gpustat, err := r.psutil.GPU()
+ if err != nil {
+ r.logger.Warn().WithError(err).Log("Failed to determine GPU usage")
+ continue
+ }
+
r.logger.Debug().WithFields(log.Fields{
"cur_cpu": cpuload,
"cur_memory": vmstat.Used,
@@ -210,34 +309,46 @@ func (r *resources) observe(ctx context.Context, interval time.Duration) {
doCPULimit := false
- if !r.isUnlimited {
- if !r.isCPULimiting {
- if cpuload >= r.maxCPU {
- r.logger.Debug().WithField("cpu", cpuload).Log("CPU limit reached")
- doCPULimit = true
- }
- } else {
+ if !r.isCPULimiting {
+ if cpuload >= r.maxCPU {
+ r.logger.Debug().WithField("cpu", cpuload).Log("CPU limit reached")
doCPULimit = true
- if cpuload < r.maxCPU {
- r.logger.Debug().WithField("cpu", cpuload).Log("CPU limit released")
- doCPULimit = false
- }
+ }
+ } else {
+ doCPULimit = true
+ if cpuload < r.maxCPU {
+ r.logger.Debug().WithField("cpu", cpuload).Log("CPU limit released")
+ doCPULimit = false
}
}
doMemoryLimit := false
- if !r.isUnlimited {
- if !r.isMemoryLimiting {
- if vmstat.Used >= r.maxMemory {
- r.logger.Debug().WithField("memory", vmstat.Used).Log("Memory limit reached")
- doMemoryLimit = true
+ if !r.isMemoryLimiting {
+ if vmstat.Used >= r.maxMemory {
+ r.logger.Debug().WithField("memory", vmstat.Used).Log("Memory limit reached")
+ doMemoryLimit = true
+ }
+ } else {
+ doMemoryLimit = true
+ if vmstat.Used < r.maxMemory {
+ r.logger.Debug().WithField("memory", vmstat.Used).Log("Memory limit released")
+ doMemoryLimit = false
+ }
+ }
+
+ doGPULimit := make([]bool, r.ngpu)
+
+ for i, limiting := range r.isGPULimiting {
+ maxMemory := uint64(r.maxGPUMemory * float64(gpustat[i].MemoryTotal) / 100)
+ if !limiting {
+ if gpustat[i].MemoryUsed >= maxMemory || (gpustat[i].Usage >= r.maxGPU && gpustat[i].Encoder >= r.maxGPU && gpustat[i].Decoder >= r.maxGPU) {
+ doGPULimit[i] = true
}
} else {
- doMemoryLimit = true
- if vmstat.Used < r.maxMemory {
- r.logger.Debug().WithField("memory", vmstat.Used).Log("Memory limit released")
- doMemoryLimit = false
+ doGPULimit[i] = true
+ if gpustat[i].MemoryUsed < maxMemory && (gpustat[i].Usage < r.maxGPU || gpustat[i].Encoder < r.maxGPU || gpustat[i].Decoder < r.maxGPU) {
+ doGPULimit[i] = false
}
}
}
@@ -247,17 +358,26 @@ func (r *resources) observe(ctx context.Context, interval time.Duration) {
r.logger.Warn().WithFields(log.Fields{
"enabled": doCPULimit,
}).Log("Limiting CPU")
-
- r.isCPULimiting = doCPULimit
}
+ r.isCPULimiting = doCPULimit
if r.isMemoryLimiting != doMemoryLimit {
r.logger.Warn().WithFields(log.Fields{
"enabled": doMemoryLimit,
}).Log("Limiting memory")
-
- r.isMemoryLimiting = doMemoryLimit
}
+ r.isMemoryLimiting = doMemoryLimit
+
+ for i, limiting := range r.isGPULimiting {
+ if limiting != doGPULimit[i] {
+ r.logger.Warn().WithFields(log.Fields{
+ "enabled": doGPULimit,
+ "index": i,
+ }).Log("Limiting GPU")
+ }
+ }
+ r.isGPULimiting = doGPULimit
+
r.lock.Unlock()
}
}
@@ -267,60 +387,136 @@ func (r *resources) HasLimits() bool {
return !r.isUnlimited
}
-func (r *resources) Limits() (float64, uint64) {
- return r.maxCPU / r.ncpu, r.maxMemory
+func (r *resources) Limits() (float64, uint64, float64, float64) {
+ return r.maxCPU / r.ncpu, r.maxMemory, r.maxGPU, r.maxGPUMemory
}
-func (r *resources) ShouldLimit() (bool, bool) {
+func (r *resources) ShouldLimit() (bool, bool, []bool) {
r.lock.RLock()
defer r.lock.RUnlock()
- return r.isCPULimiting, r.isMemoryLimiting
+ return r.isCPULimiting, r.isMemoryLimiting, slices.Copy(r.isGPULimiting)
}
-func (r *resources) Request(cpu float64, memory uint64) error {
+func (r *resources) Request(req Request) (Response, error) {
+ res := Response{
+ GPU: -1,
+ }
+
r.lock.RLock()
defer r.lock.RUnlock()
logger := r.logger.WithFields(log.Fields{
- "req_cpu": cpu,
- "req_memory": memory,
+ "req_cpu": req.CPU,
+ "req_memory": req.Memory,
+ "req_gpu": req.GPUUsage,
+ "req_gpu_encoder": req.GPUEncoder,
+ "req_gpu_decoder": req.GPUDecoder,
+ "req_gpu_memory": req.GPUMemory,
})
logger.Debug().Log("Request for acquiring resources")
+ // Check if anything is currently limiting.
if r.isCPULimiting || r.isMemoryLimiting {
logger.Debug().Log("Rejected, currently limiting")
- return fmt.Errorf("resources are currenlty actively limited")
+ return res, fmt.Errorf("resources are currenlty actively limited")
}
- if cpu <= 0 || memory == 0 {
+ // Check if the requested resources are valid.
+ if req.CPU <= 0 || req.Memory == 0 {
logger.Debug().Log("Rejected, invalid values")
- return fmt.Errorf("the cpu and/or memory values are invalid: cpu=%f, memory=%d", cpu, memory)
+ return res, fmt.Errorf("the cpu and/or memory values are invalid: cpu=%f, memory=%d", req.CPU, req.Memory)
}
- cpustat, err := r.psutil.CPUPercent()
+ // Get current CPU and memory values.
+ cpustat, err := r.psutil.CPU()
if err != nil {
r.logger.Warn().WithError(err).Log("Failed to determine system CPU usage")
- return fmt.Errorf("the system CPU usage couldn't be determined")
+ return res, fmt.Errorf("the system CPU usage couldn't be determined")
}
cpuload := (cpustat.User + cpustat.System + cpustat.Other) * r.ncpu
- vmstat, err := r.psutil.VirtualMemory()
+ vmstat, err := r.psutil.Memory()
if err != nil {
r.logger.Warn().WithError(err).Log("Failed to determine system memory usage")
- return fmt.Errorf("the system memory usage couldn't be determined")
+ return res, fmt.Errorf("the system memory usage couldn't be determined")
}
- if cpuload+cpu > r.maxCPU {
+ // Check if enough resources are available
+ if cpuload+req.CPU > r.maxCPU {
logger.Debug().WithField("cur_cpu", cpuload).Log("Rejected, CPU limit exceeded")
- return fmt.Errorf("the CPU limit would be exceeded: %f + %f > %f", cpuload, cpu, r.maxCPU)
+ return res, fmt.Errorf("the CPU limit would be exceeded: %f + %f > %f", cpuload, req.CPU, r.maxCPU)
}
- if vmstat.Used+memory > r.maxMemory {
+ if vmstat.Used+req.Memory > r.maxMemory {
logger.Debug().WithField("cur_memory", vmstat.Used).Log("Rejected, memory limit exceeded")
- return fmt.Errorf("the memory limit would be exceeded: %d + %d > %d", vmstat.Used, memory, r.maxMemory)
+ return res, fmt.Errorf("the memory limit would be exceeded: %d + %d > %d", vmstat.Used, req.Memory, r.maxMemory)
+ }
+
+ // Check if any GPU resources are requested
+ if req.GPUUsage > 0 || req.GPUEncoder > 0 || req.GPUDecoder > 0 || req.GPUMemory > 0 {
+ if req.GPUUsage < 0 || req.GPUEncoder < 0 || req.GPUDecoder < 0 || req.GPUMemory == 0 {
+ logger.Debug().Log("Rejected, invalid values")
+ return res, fmt.Errorf("the gpu usage and memory values are invalid: usage=%f, encoder=%f, decoder=%f, memory=%d", req.GPUUsage, req.GPUEncoder, req.GPUDecoder, req.GPUMemory)
+ }
+
+ // Get current GPU values
+ gpustat, err := r.psutil.GPU()
+ if err != nil {
+ r.logger.Warn().WithError(err).Log("Failed to determine GPU usage")
+ return res, fmt.Errorf("the GPU usage couldn't be determined")
+ }
+
+ if len(gpustat) == 0 {
+ r.logger.Debug().WithError(err).Log("GPU resources requested but no GPU available")
+ return res, fmt.Errorf("some GPU resources requested but no GPU available")
+ }
+
+ foundGPU := -1
+ for _, g := range gpustat {
+ if req.GPUUsage > 0 && g.Usage+req.GPUUsage > r.maxGPU {
+ logger.Debug().WithFields(log.Fields{"id": g.Index, "cur_gpu": g.Usage}).Log("Rejected, GPU usage limit exceeded")
+ continue
+ }
+
+ if req.GPUEncoder > 0 && g.Encoder+req.GPUEncoder > r.maxGPU {
+ logger.Debug().WithFields(log.Fields{"id": g.Index, "cur_gpu_encoder": g.Usage}).Log("Rejected, GPU encoder usage limit exceeded")
+ continue
+ }
+
+ if req.GPUDecoder > 0 && g.Decoder+req.GPUDecoder > r.maxGPU {
+ logger.Debug().WithFields(log.Fields{"id": g.Index, "cur_gpu_decoder": g.Usage}).Log("Rejected, GPU decoder usage limit exceeded")
+ continue
+ }
+
+ gpuMemoryUsage := float64(g.MemoryUsed) / float64(g.MemoryTotal) * 100
+ requestedGPUMemoryUsage := float64(req.GPUMemory) / float64(g.MemoryTotal) * 100
+
+ if gpuMemoryUsage+requestedGPUMemoryUsage > r.maxGPUMemory {
+ logger.Debug().WithFields(log.Fields{"id": g.Index, "cur_gpu_memory": gpuMemoryUsage}).Log("Rejected, GPU memory usage limit exceeded")
+ continue
+ }
+
+ foundGPU = g.Index
+
+ logger = logger.Debug().WithFields(log.Fields{
+ "cur_gpu": foundGPU,
+ "cur_gpu_general": g.Usage,
+ "cur_gpu_encoder": g.Encoder,
+ "cur_gpu_decoder": g.Decoder,
+ "cur_gpu_memory": gpuMemoryUsage,
+ })
+
+ break
+ }
+
+ if foundGPU < 0 {
+ return res, fmt.Errorf("all GPU usage limits are exceeded")
+ }
+
+ res.GPU = foundGPU
}
logger.Debug().WithFields(log.Fields{
@@ -328,17 +524,18 @@ func (r *resources) Request(cpu float64, memory uint64) error {
"cur_memory": vmstat.Used,
}).Log("Acquiring approved")
- return nil
+ return res, nil
}
func (r *resources) Info() Info {
- cpulimit, memlimit := r.Limits()
- cputhrottling, memthrottling := r.ShouldLimit()
+ cpulimit, memlimit, gpulimit, gpumemlimit := r.Limits()
+ cputhrottling, memthrottling, gputhrottling := r.ShouldLimit()
- cpustat, cpuerr := r.psutil.CPUPercent()
- memstat, memerr := r.psutil.VirtualMemory()
- selfcpu, _ := r.self.CPUPercent()
- selfmem, _ := r.self.VirtualMemory()
+ cpustat, cpuerr := r.psutil.CPU()
+ memstat, memerr := r.psutil.Memory()
+ gpustat, gpuerr := r.psutil.GPU()
+ selfcpu, _ := r.self.CPU()
+ selfmem, _ := r.self.Memory()
cpuinfo := CPUInfo{
NCPU: r.ncpu,
@@ -362,10 +559,174 @@ func (r *resources) Info() Info {
Error: memerr,
}
+ gpuinfo := GPUInfo{
+ NGPU: float64(len(gpustat)),
+ Error: gpuerr,
+ }
+
+ for i, g := range gpustat {
+ gpuinfo.GPU = append(gpuinfo.GPU, GPUInfoStat{
+ Index: g.Index,
+ ID: g.ID,
+ Name: g.Name,
+ MemoryTotal: g.MemoryTotal,
+ MemoryUsed: g.MemoryUsed,
+ MemoryAvailable: g.MemoryTotal - g.MemoryUsed,
+ MemoryLimit: uint64(float64(g.MemoryTotal) * gpumemlimit / 100),
+ Usage: g.Usage,
+ Encoder: g.Encoder,
+ Decoder: g.Decoder,
+ UsageLimit: gpulimit,
+ })
+
+ if i < len(gputhrottling) {
+ gpuinfo.GPU[i].Throttling = gputhrottling[i]
+ }
+ }
+
i := Info{
CPU: cpuinfo,
Mem: meminfo,
+ GPU: gpuinfo,
}
return i
}
+
+func (r *resources) Disk(path string) (*DiskInfo, error) {
+ info, err := r.psutil.Disk(path)
+ if err != nil {
+ return nil, err
+ }
+
+ diskinfo := &DiskInfo{
+ Path: info.Path,
+ Fstype: info.Fstype,
+ Total: info.Total,
+ Used: info.Used,
+ InodesTotal: info.InodesTotal,
+ InodesUsed: info.InodesUsed,
+ }
+
+ return diskinfo, nil
+}
+
+func (r *resources) Network() ([]NetworkInfo, error) {
+ netio, err := r.psutil.Network()
+ if err != nil {
+ return nil, err
+ }
+
+ info := []NetworkInfo{}
+
+ for _, io := range netio {
+ info = append(info, NetworkInfo{
+ Name: io.Name,
+ BytesSent: io.BytesSent,
+ BytesRecv: io.BytesRecv,
+ })
+ }
+
+ return info, nil
+}
+
+func (r *resources) Process(pid int32) (Process, error) {
+ proc, err := r.psutil.Process(pid)
+ if err != nil {
+ return nil, err
+ }
+
+ p := &process{
+ proc: proc,
+ }
+
+ return p, nil
+}
+
+type Process interface {
+ Info() (ProcessInfo, error)
+
+ // Cancel will stop collecting CPU and memory data for this process.
+ Cancel()
+
+ // Suspend will send SIGSTOP to the process.
+ Suspend() error
+
+ // Resume will send SIGCONT to the process.
+ Resume() error
+}
+
+type process struct {
+ proc psutil.Process
+}
+
+type ProcessInfoCPU struct {
+ System float64 // percent 0-100
+ User float64 // percent 0-100
+ Idle float64 // percent 0-100
+ Other float64 // percent 0-100
+}
+
+type ProcessInfoGPU struct {
+ Index int // Index of the GPU
+
+ MemoryUsed uint64 // bytes
+
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+}
+
+type ProcessInfo struct {
+ CPU ProcessInfoCPU
+ Memory uint64
+ GPU ProcessInfoGPU
+}
+
+func (p *process) Info() (ProcessInfo, error) {
+ cpu, err := p.proc.CPU()
+ if err != nil {
+ return ProcessInfo{}, err
+ }
+
+ mem, err := p.proc.Memory()
+ if err != nil {
+ return ProcessInfo{}, err
+ }
+
+ gpu, err := p.proc.GPU()
+ if err != nil {
+ return ProcessInfo{}, err
+ }
+
+ pi := ProcessInfo{
+ CPU: ProcessInfoCPU{
+ System: cpu.System,
+ User: cpu.User,
+ Idle: cpu.Idle,
+ Other: cpu.Other,
+ },
+ Memory: mem,
+ GPU: ProcessInfoGPU{
+ Index: gpu.Index,
+ MemoryUsed: gpu.MemoryUsed,
+ Usage: gpu.Usage,
+ Encoder: gpu.Encoder,
+ Decoder: gpu.Decoder,
+ },
+ }
+
+ return pi, nil
+}
+
+func (p *process) Cancel() {
+ p.proc.Cancel()
+}
+
+func (p *process) Suspend() error {
+ return p.proc.Suspend()
+}
+
+func (p *process) Resume() error {
+ return p.proc.Resume()
+}
diff --git a/resources/resources_test.go b/resources/resources_test.go
index 0158c7f7..dba43503 100644
--- a/resources/resources_test.go
+++ b/resources/resources_test.go
@@ -1,60 +1,105 @@
package resources
import (
+ "slices"
"sync"
"testing"
"time"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/internal/mock/psutil"
- "github.com/shirou/gopsutil/v3/disk"
- "github.com/shirou/gopsutil/v3/net"
"github.com/stretchr/testify/require"
)
-type util struct{}
+func TestConfigNoLimits(t *testing.T) {
+ _, err := New(Config{
+ PSUtil: psutil.New(0),
+ })
+ require.NoError(t, err)
+}
-func (u *util) Start() {}
-func (u *util) Stop() {}
+func TestConfigWrongLimits(t *testing.T) {
+ _, err := New(Config{
+ MaxCPU: 102,
+ MaxMemory: 573,
+ PSUtil: psutil.New(0),
+ })
+ require.Error(t, err)
-func (u *util) CPUCounts(logical bool) (float64, error) {
- return 2, nil
-}
+ _, err = New(Config{
+ MaxCPU: 0,
+ MaxMemory: 0,
+ MaxGPU: 101,
+ MaxGPUMemory: 103,
+ PSUtil: psutil.New(0),
+ })
+ require.NoError(t, err)
-func (u *util) CPUPercent() (*psutil.CPUInfoStat, error) {
- return &psutil.CPUInfoStat{
- System: 10,
- User: 50,
- Idle: 35,
- Other: 5,
- }, nil
+ _, err = New(Config{
+ MaxCPU: 0,
+ MaxMemory: 0,
+ MaxGPU: 101,
+ MaxGPUMemory: 103,
+ PSUtil: psutil.New(1),
+ })
+ require.Error(t, err)
}
-func (u *util) DiskUsage(path string) (*disk.UsageStat, error) {
- return &disk.UsageStat{}, nil
-}
+func TestMemoryLimit(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 150. / 200. * 100,
+ PSUtil: psutil.New(0),
+ Logger: nil,
+ })
+ require.NoError(t, err)
-func (u *util) VirtualMemory() (*psutil.MemoryInfoStat, error) {
- return &psutil.MemoryInfoStat{
- Total: 200,
- Available: 40,
- Used: 160,
- }, nil
-}
+ wg := sync.WaitGroup{}
+ wg.Add(1)
-func (u *util) NetIOCounters(pernic bool) ([]net.IOCountersStat, error) {
- return nil, nil
-}
+ limit := false
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, limit, _ = r.ShouldLimit()
+ if limit {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.True(t, limit)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10})
+ require.Error(t, err)
-func (u *util) Process(pid int32) (psutil.Process, error) {
- return nil, nil
+ r.Cancel()
}
-func TestMemoryLimit(t *testing.T) {
+func TestMemoryUnlimit(t *testing.T) {
+ util := psutil.New(0)
+
r, err := New(Config{
MaxCPU: 100,
MaxMemory: 150. / 200. * 100,
- PSUtil: &util{},
+ PSUtil: util,
Logger: nil,
})
require.NoError(t, err)
@@ -78,7 +123,7 @@ func TestMemoryLimit(t *testing.T) {
for {
select {
case <-ticker.C:
- _, limit = r.ShouldLimit()
+ _, limit, _ = r.ShouldLimit()
if limit {
return
}
@@ -88,20 +133,55 @@ func TestMemoryLimit(t *testing.T) {
}
}()
- r.Start()
-
wg.Wait()
require.True(t, limit)
- r.Stop()
+ _, limit, _ = r.ShouldLimit()
+ require.True(t, limit)
+
+ util.Lock.Lock()
+ util.MemInfo.Used = 140
+ util.Lock.Unlock()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, limit, _ = r.ShouldLimit()
+ if !limit {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.False(t, limit)
+
+ r.Cancel()
}
func TestCPULimit(t *testing.T) {
r, err := New(Config{
MaxCPU: 50.,
MaxMemory: 100,
- PSUtil: &util{},
+ PSUtil: psutil.New(0),
Logger: nil,
})
require.NoError(t, err)
@@ -125,7 +205,7 @@ func TestCPULimit(t *testing.T) {
for {
select {
case <-ticker.C:
- limit, _ = r.ShouldLimit()
+ limit, _, _ = r.ShouldLimit()
if limit {
return
}
@@ -135,42 +215,533 @@ func TestCPULimit(t *testing.T) {
}
}()
- r.Start()
+ wg.Wait()
+
+ require.True(t, limit)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10})
+ require.Error(t, err)
+
+ r.Cancel()
+}
+
+func TestCPUUnlimit(t *testing.T) {
+ util := psutil.New(0)
+
+ r, err := New(Config{
+ MaxCPU: 50.,
+ MaxMemory: 100,
+ PSUtil: util,
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := false
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ limit, _, _ = r.ShouldLimit()
+ if limit {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
wg.Wait()
require.True(t, limit)
- r.Stop()
+ limit, _, _ = r.ShouldLimit()
+ require.True(t, limit)
+
+ util.Lock.Lock()
+ util.CPUInfo.User = 20
+ util.Lock.Unlock()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ limit, _, _ = r.ShouldLimit()
+ if !limit {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.False(t, limit)
+
+ r.Cancel()
}
-func TestRequest(t *testing.T) {
+func TestGPULimitMemory(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 100,
+ MaxGPUMemory: 20,
+ PSUtil: psutil.New(2),
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := []bool{}
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Contains(t, limit, true)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10, GPUUsage: 10, GPUMemory: 10})
+ require.Error(t, err)
+
+ r.Cancel()
+}
+
+func TestGPUUnlimitMemory(t *testing.T) {
+ util := psutil.New(2)
+
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 100,
+ MaxGPUMemory: 20,
+ PSUtil: util,
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := []bool{}
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Contains(t, limit, true)
+
+ util.Lock.Lock()
+ util.GPUInfo[0].MemoryUsed = 10
+ util.GPUInfo[1].MemoryUsed = 10
+ util.Lock.Unlock()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if !slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.NotContains(t, limit, true)
+
+ r.Cancel()
+}
+
+func TestGPULimitMemorySome(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 100,
+ MaxGPUMemory: 14. / 24. * 100.,
+ PSUtil: psutil.New(4),
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := []bool{}
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Equal(t, []bool{false, false, true, true}, limit)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10, GPUUsage: 10, GPUMemory: 10})
+ require.NoError(t, err)
+
+ r.Cancel()
+}
+
+func TestGPULimitUsage(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 40,
+ MaxGPUMemory: 100,
+ PSUtil: psutil.New(3),
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := []bool{}
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Equal(t, []bool{true, false, false}, limit)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10, GPUUsage: 10, GPUMemory: 10})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10, GPUEncoder: 10, GPUMemory: 10})
+ require.NoError(t, err)
+
+ r.Cancel()
+}
+
+func TestGPUUnlimitUsage(t *testing.T) {
+ util := psutil.New(3)
+
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 40,
+ MaxGPUMemory: 100,
+ PSUtil: util,
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ wg := sync.WaitGroup{}
+ wg.Add(1)
+
+ limit := []bool{}
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Equal(t, []bool{true, false, false}, limit)
+
+ util.Lock.Lock()
+ util.GPUInfo[0].Usage = 30
+ util.GPUInfo[0].Encoder = 30
+ util.GPUInfo[0].Decoder = 30
+ util.Lock.Unlock()
+
+ wg.Add(1)
+
+ go func() {
+ defer func() {
+ wg.Done()
+ }()
+
+ timer := time.NewTimer(10 * time.Second)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(time.Second)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ _, _, limit = r.ShouldLimit()
+ if !slices.Contains(limit, true) {
+ return
+ }
+ case <-timer.C:
+ return
+ }
+ }
+ }()
+
+ wg.Wait()
+
+ require.Equal(t, []bool{false, false, false}, limit)
+
+ r.Cancel()
+}
+
+func TestRequestCPU(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 70.,
+ PSUtil: psutil.New(0),
+ })
+ require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 0, Memory: 0})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 5, Memory: 10})
+ require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 30, Memory: 10})
+ require.Error(t, err)
+}
+
+func TestRequestMemory(t *testing.T) {
r, err := New(Config{
- MaxCPU: 70.,
MaxMemory: 170. / 200. * 100,
- PSUtil: &util{},
- Logger: nil,
+ PSUtil: psutil.New(0),
})
require.NoError(t, err)
- err = r.Request(-1, 0)
+ _, err = r.Request(Request{CPU: 5, Memory: 0})
require.Error(t, err)
- err = r.Request(5, 10)
+ _, err = r.Request(Request{CPU: 5, Memory: 10})
require.NoError(t, err)
- err = r.Request(5, 20)
+ _, err = r.Request(Request{CPU: 50, Memory: 20})
require.Error(t, err)
+}
- err = r.Request(10, 10)
+func TestRequestNoGPU(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ PSUtil: psutil.New(0),
+ })
require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 30, GPUMemory: 10})
+ require.Error(t, err)
+}
+
+func TestRequestInvalidGPURequest(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ PSUtil: psutil.New(1),
+ })
+ require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 30, GPUMemory: 0})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUUsage: -1, GPUEncoder: 30, GPUMemory: 0})
+ require.Error(t, err)
+}
+
+func TestRequestGPULimitsOneGPU(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 50,
+ MaxGPUMemory: 60,
+ PSUtil: psutil.New(1),
+ })
+ require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUUsage: 50, GPUMemory: 10})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 50, GPUMemory: 10})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUDecoder: 50, GPUMemory: 10})
+ require.Error(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 10, GPUMemory: 5 * 1024 * 1024 * 1024})
+ require.Error(t, err)
+
+ res, err := r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 10, GPUMemory: 10})
+ require.NoError(t, err)
+ require.Equal(t, 0, res.GPU)
+}
+
+func TestRequestGPULimitsMoreGPU(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 100,
+ MaxMemory: 100,
+ MaxGPU: 60,
+ MaxGPUMemory: 60,
+ PSUtil: psutil.New(2),
+ })
+ require.NoError(t, err)
+
+ _, err = r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 50, GPUMemory: 10})
+ require.Error(t, err)
+
+ res, err := r.Request(Request{CPU: 10, Memory: 10, GPUEncoder: 30, GPUMemory: 10})
+ require.NoError(t, err)
+ require.Equal(t, 1, res.GPU)
}
func TestHasLimits(t *testing.T) {
r, err := New(Config{
MaxCPU: 70.,
MaxMemory: 170. / 200. * 100,
- PSUtil: &util{},
+ PSUtil: psutil.New(0),
Logger: nil,
})
require.NoError(t, err)
@@ -180,7 +751,7 @@ func TestHasLimits(t *testing.T) {
r, err = New(Config{
MaxCPU: 100,
MaxMemory: 100,
- PSUtil: &util{},
+ PSUtil: psutil.New(0),
Logger: nil,
})
require.NoError(t, err)
@@ -190,10 +761,97 @@ func TestHasLimits(t *testing.T) {
r, err = New(Config{
MaxCPU: 0,
MaxMemory: 0,
- PSUtil: &util{},
+ PSUtil: psutil.New(0),
Logger: nil,
})
require.NoError(t, err)
require.False(t, r.HasLimits())
+
+ r, err = New(Config{
+ MaxCPU: 0,
+ MaxMemory: 0,
+ MaxGPU: 10,
+ PSUtil: psutil.New(1),
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ require.True(t, r.HasLimits())
+
+ r, err = New(Config{
+ MaxCPU: 0,
+ MaxMemory: 0,
+ MaxGPU: 10,
+ PSUtil: psutil.New(0),
+ Logger: nil,
+ })
+ require.NoError(t, err)
+
+ require.False(t, r.HasLimits())
+}
+
+func TestInfo(t *testing.T) {
+ r, err := New(Config{
+ MaxCPU: 90,
+ MaxMemory: 90,
+ MaxGPU: 11,
+ MaxGPUMemory: 50,
+ PSUtil: psutil.New(2),
+ })
+ require.NoError(t, err)
+
+ info := r.Info()
+
+ require.Equal(t, Info{
+ Mem: MemoryInfo{
+ Total: 200,
+ Available: 40,
+ Used: 160,
+ Limit: 180,
+ Core: 42,
+ Throttling: false,
+ Error: nil,
+ },
+ CPU: CPUInfo{
+ NCPU: 2,
+ System: 10,
+ User: 50,
+ Idle: 35,
+ Other: 5,
+ Limit: 90,
+ Core: 6,
+ Throttling: false,
+ Error: nil,
+ },
+ GPU: GPUInfo{
+ NGPU: 2,
+ GPU: []GPUInfoStat{{
+ Index: 0,
+ ID: "00000000:01:00.0",
+ Name: "L4",
+ MemoryTotal: 24 * 1024 * 1024 * 1024,
+ MemoryUsed: 12 * 1024 * 1024 * 1024,
+ MemoryAvailable: 12 * 1024 * 1024 * 1024,
+ MemoryLimit: 12 * 1024 * 1024 * 1024,
+ Usage: 45,
+ Encoder: 40,
+ Decoder: 47,
+ UsageLimit: 11,
+ }, {
+ Index: 1,
+ ID: "00000000:01:00.0",
+ Name: "L4",
+ MemoryTotal: 24 * 1024 * 1024 * 1024,
+ MemoryUsed: 13 * 1024 * 1024 * 1024,
+ MemoryAvailable: 11 * 1024 * 1024 * 1024,
+ MemoryLimit: 12 * 1024 * 1024 * 1024,
+ Usage: 40,
+ Encoder: 30,
+ Decoder: 44,
+ UsageLimit: 11,
+ }},
+ Error: nil,
+ },
+ }, info)
}
diff --git a/restream/app/avstream.go b/restream/app/avstream.go
index 2a5deb5e..9ff088ff 100644
--- a/restream/app/avstream.go
+++ b/restream/app/avstream.go
@@ -67,6 +67,18 @@ type AVstream struct {
Mode string // "file" or "live"
Debug interface{}
Swap AVStreamSwap
+
+ // Codec parameter
+ Codec string
+ Profile int
+ Level int
+ Pixfmt string
+ Width uint64
+ Height uint64
+ Samplefmt string
+ Sampling uint64
+ Layout string
+ Channels uint64
}
func (a *AVstream) UnmarshalParser(p *parse.AVstream) {
@@ -74,6 +86,9 @@ func (a *AVstream) UnmarshalParser(p *parse.AVstream) {
return
}
+ a.Input.UnmarshalParser(&p.Input)
+ a.Output.UnmarshalParser(&p.Output)
+
a.Aqueue = p.Aqueue
a.Queue = p.Queue
a.Dup = p.Dup
@@ -85,8 +100,17 @@ func (a *AVstream) UnmarshalParser(p *parse.AVstream) {
a.GOP = p.GOP
a.Mode = p.Mode
a.Swap.UnmarshalParser(&p.Swap)
- a.Input.UnmarshalParser(&p.Input)
- a.Output.UnmarshalParser(&p.Output)
+
+ a.Codec = p.Codec
+ a.Profile = p.Profile
+ a.Level = p.Level
+ a.Pixfmt = p.Pixfmt
+ a.Width = p.Width
+ a.Height = p.Height
+ a.Samplefmt = p.Samplefmt
+ a.Sampling = p.Sampling
+ a.Layout = p.Layout
+ a.Channels = p.Channels
}
func (a *AVstream) MarshalParser() *parse.AVstream {
@@ -105,6 +129,16 @@ func (a *AVstream) MarshalParser() *parse.AVstream {
Mode: a.Mode,
Debug: a.Debug,
Swap: a.Swap.MarshalParser(),
+ Codec: a.Codec,
+ Profile: a.Profile,
+ Level: a.Level,
+ Pixfmt: a.Pixfmt,
+ Width: a.Width,
+ Height: a.Height,
+ Samplefmt: a.Samplefmt,
+ Sampling: a.Sampling,
+ Layout: a.Layout,
+ Channels: a.Channels,
}
return p
diff --git a/restream/app/avstream_test.go b/restream/app/avstream_test.go
index 834aad0a..ea6a229d 100644
--- a/restream/app/avstream_test.go
+++ b/restream/app/avstream_test.go
@@ -68,6 +68,16 @@ func TestAVstream(t *testing.T) {
LastURL: "fjfd",
LastError: "none",
},
+ Codec: "h264",
+ Profile: 858,
+ Level: 64,
+ Pixfmt: "yuv420p",
+ Width: 1920,
+ Height: 1080,
+ Samplefmt: "fltp",
+ Sampling: 44100,
+ Layout: "stereo",
+ Channels: 42,
}
p := AVstream{}
diff --git a/restream/app/process.go b/restream/app/process.go
index 8d58d746..e02cd69a 100644
--- a/restream/app/process.go
+++ b/restream/app/process.go
@@ -9,6 +9,7 @@ import (
"sync"
"github.com/datarhei/core/v16/ffmpeg/parse"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/process"
)
@@ -78,13 +79,21 @@ type Config struct {
Reconnect bool
ReconnectDelay uint64 // seconds
Autostart bool
- StaleTimeout uint64 // seconds
- Timeout uint64 // seconds
- Scheduler string // crontab pattern or RFC3339 timestamp
- LogPatterns []string // will be interpreted as regular expressions
- LimitCPU float64 // percent
- LimitMemory uint64 // bytes
- LimitWaitFor uint64 // seconds
+ StaleTimeout uint64 // seconds
+ Timeout uint64 // seconds
+ Scheduler string // crontab pattern or RFC3339 timestamp
+ LogPatterns []string // will be interpreted as regular expressions
+ LimitCPU float64 // percent
+ LimitMemory uint64 // bytes
+ LimitGPU ConfigLimitGPU // GPU limits
+ LimitWaitFor uint64 // seconds
+}
+
+type ConfigLimitGPU struct {
+ Usage float64 // percent 0-100
+ Encoder float64 // percent 0-100
+ Decoder float64 // percent 0-100
+ Memory uint64 // bytes
}
func (config *Config) Clone() *Config {
@@ -102,6 +111,7 @@ func (config *Config) Clone() *Config {
Scheduler: config.Scheduler,
LimitCPU: config.LimitCPU,
LimitMemory: config.LimitMemory,
+ LimitGPU: config.LimitGPU,
LimitWaitFor: config.LimitWaitFor,
}
@@ -156,7 +166,8 @@ func (config *Config) String() string {
}
func (config *Config) Hash() []byte {
- b := bytes.Buffer{}
+ b := mem.Get()
+ defer mem.Put(b)
b.WriteString(config.ID)
b.WriteString(config.Reference)
@@ -173,6 +184,10 @@ func (config *Config) Hash() []byte {
b.WriteString(strconv.FormatUint(config.LimitMemory, 10))
b.WriteString(strconv.FormatUint(config.LimitWaitFor, 10))
b.WriteString(strconv.FormatFloat(config.LimitCPU, 'f', -1, 64))
+ b.WriteString(strconv.FormatFloat(config.LimitGPU.Usage, 'f', -1, 64))
+ b.WriteString(strconv.FormatFloat(config.LimitGPU.Encoder, 'f', -1, 64))
+ b.WriteString(strconv.FormatFloat(config.LimitGPU.Decoder, 'f', -1, 64))
+ b.WriteString(strconv.FormatUint(config.LimitGPU.Memory, 10))
for _, x := range config.Input {
b.WriteString(x.HashString())
@@ -292,7 +307,7 @@ type State struct {
Memory uint64 // Current memory consumption in bytes
CPU float64 // Current CPU consumption in percent
LimitMode string // How the process is limited (hard or soft)
- Resources ProcessUsage // Current resource usage, include CPU and memory consumption
+ Resources ProcessUsage // Current resource usage, include CPU, memory and GPU consumption
Command []string // ffmpeg command line parameters
}
@@ -324,10 +339,10 @@ func (p *ProcessUsageCPU) MarshalParser() parse.UsageCPU {
}
type ProcessUsageMemory struct {
- Current uint64 // bytes
- Average float64 // bytes
- Max uint64 // bytes
- Limit uint64 // bytes
+ Current uint64 // bytes
+ Average uint64 // bytes
+ Max uint64 // bytes
+ Limit uint64 // bytes
}
func (p *ProcessUsageMemory) UnmarshalParser(pp *parse.UsageMemory) {
@@ -346,20 +361,97 @@ func (p *ProcessUsageMemory) MarshalParser() parse.UsageMemory {
return pp
}
+type ProcessUsageGPU struct {
+ Index int
+ Usage ProcessUsageGPUUsage
+ Encoder ProcessUsageGPUUsage
+ Decoder ProcessUsageGPUUsage
+ Memory ProcessUsageGPUMemory
+}
+
+func (p *ProcessUsageGPU) UnmarshalParser(pp *parse.UsageGPU) {
+ p.Index = pp.Index
+ p.Usage.UnmarshalParser(&pp.Usage)
+ p.Encoder.UnmarshalParser(&pp.Encoder)
+ p.Decoder.UnmarshalParser(&pp.Decoder)
+ p.Memory.UnmarshalParser(&pp.Memory)
+}
+
+func (p *ProcessUsageGPU) MarshalParser() parse.UsageGPU {
+ pp := parse.UsageGPU{
+ Index: p.Index,
+ Usage: p.Usage.MarshalParser(),
+ Encoder: p.Encoder.MarshalParser(),
+ Decoder: p.Decoder.MarshalParser(),
+ Memory: p.Memory.MarshalParser(),
+ }
+
+ return pp
+}
+
+type ProcessUsageGPUUsage struct {
+ Current float64 // percent 0-100
+ Average float64 // percent 0-100
+ Max float64 // percent 0-100
+ Limit float64 // percent 0-100
+}
+
+func (p *ProcessUsageGPUUsage) UnmarshalParser(pp *parse.UsageGPUUsage) {
+ p.Average = pp.Average
+ p.Max = pp.Max
+ p.Limit = pp.Limit
+}
+
+func (p *ProcessUsageGPUUsage) MarshalParser() parse.UsageGPUUsage {
+ pp := parse.UsageGPUUsage{
+ Average: p.Average,
+ Max: p.Max,
+ Limit: p.Limit,
+ }
+
+ return pp
+}
+
+type ProcessUsageGPUMemory struct {
+ Current uint64 // bytes
+ Average uint64 // bytes
+ Max uint64 // bytes
+ Limit uint64 // bytes
+}
+
+func (p *ProcessUsageGPUMemory) UnmarshalParser(pp *parse.UsageGPUMemory) {
+ p.Average = pp.Average
+ p.Max = pp.Max
+ p.Limit = pp.Limit
+}
+
+func (p *ProcessUsageGPUMemory) MarshalParser() parse.UsageGPUMemory {
+ pp := parse.UsageGPUMemory{
+ Average: p.Average,
+ Max: p.Max,
+ Limit: p.Limit,
+ }
+
+ return pp
+}
+
type ProcessUsage struct {
CPU ProcessUsageCPU
Memory ProcessUsageMemory
+ GPU ProcessUsageGPU
}
func (p *ProcessUsage) UnmarshalParser(pp *parse.Usage) {
p.CPU.UnmarshalParser(&pp.CPU)
p.Memory.UnmarshalParser(&pp.Memory)
+ p.GPU.UnmarshalParser(&pp.GPU)
}
func (p *ProcessUsage) MarshalParser() parse.Usage {
pp := parse.Usage{
CPU: p.CPU.MarshalParser(),
Memory: p.Memory.MarshalParser(),
+ GPU: p.GPU.MarshalParser(),
}
return pp
diff --git a/restream/app/process_test.go b/restream/app/process_test.go
index 96889697..2aa6168b 100644
--- a/restream/app/process_test.go
+++ b/restream/app/process_test.go
@@ -46,12 +46,18 @@ func TestConfigHash(t *testing.T) {
LogPatterns: []string{"^libx264"},
LimitCPU: 50,
LimitMemory: 3 * 1024 * 1024,
- LimitWaitFor: 20,
+ LimitGPU: ConfigLimitGPU{
+ Usage: 10,
+ Encoder: 42,
+ Decoder: 14,
+ Memory: 500 * 1024 * 1024,
+ },
+ LimitWaitFor: 20,
}
hash1 := config.Hash()
- require.Equal(t, []byte{0x7e, 0xae, 0x5b, 0xc3, 0xad, 0xe3, 0x9a, 0xfc, 0xd3, 0x49, 0x15, 0x28, 0x93, 0x17, 0xc5, 0xbf}, hash1)
+ require.Equal(t, []byte{0x5e, 0x85, 0xc3, 0xc5, 0x44, 0xfd, 0x3e, 0x10, 0x13, 0x76, 0x36, 0x8b, 0xbe, 0x7e, 0xa6, 0xbb}, hash1)
config.Reconnect = false
diff --git a/restream/app/progress.go b/restream/app/progress.go
index d747ba67..e0681b01 100644
--- a/restream/app/progress.go
+++ b/restream/app/progress.go
@@ -19,6 +19,8 @@ type ProgressIO struct {
Type string
Codec string
Coder string
+ Profile int
+ Level int
Frame uint64 // counter
Keyframe uint64 // counter
Framerate ProgressIOFramerate
@@ -36,9 +38,10 @@ type ProgressIO struct {
Height uint64
// Audio
- Sampling uint64
- Layout string
- Channels uint64
+ Samplefmt string
+ Sampling uint64
+ Layout string
+ Channels uint64
// avstream
AVstream *AVstream
@@ -52,6 +55,8 @@ func (p *ProgressIO) UnmarshalParser(pp *parse.ProgressIO) {
p.Type = pp.Type
p.Codec = pp.Codec
p.Coder = pp.Coder
+ p.Profile = pp.Profile
+ p.Level = pp.Level
p.Frame = pp.Frame
p.Keyframe = pp.Keyframe
p.Framerate = pp.Framerate
@@ -65,6 +70,7 @@ func (p *ProgressIO) UnmarshalParser(pp *parse.ProgressIO) {
p.Quantizer = pp.Quantizer
p.Width = pp.Width
p.Height = pp.Height
+ p.Samplefmt = pp.Samplefmt
p.Sampling = pp.Sampling
p.Layout = pp.Layout
p.Channels = pp.Channels
@@ -86,6 +92,8 @@ func (p *ProgressIO) MarshalParser() parse.ProgressIO {
Type: p.Type,
Codec: p.Codec,
Coder: p.Coder,
+ Profile: p.Profile,
+ Level: p.Level,
Frame: p.Frame,
Keyframe: p.Keyframe,
Framerate: p.Framerate,
@@ -99,6 +107,7 @@ func (p *ProgressIO) MarshalParser() parse.ProgressIO {
Quantizer: p.Quantizer,
Width: p.Width,
Height: p.Height,
+ Samplefmt: p.Samplefmt,
Sampling: p.Sampling,
Layout: p.Layout,
Channels: p.Channels,
diff --git a/restream/app/progress_test.go b/restream/app/progress_test.go
index bfffa293..b8cce1ba 100644
--- a/restream/app/progress_test.go
+++ b/restream/app/progress_test.go
@@ -16,6 +16,8 @@ func TestProgressIO(t *testing.T) {
Type: "video",
Codec: "h264",
Coder: "libx264",
+ Profile: 848,
+ Level: 48,
Frame: 39,
Keyframe: 433,
Framerate: struct {
@@ -37,6 +39,7 @@ func TestProgressIO(t *testing.T) {
Quantizer: 2.3,
Width: 4848,
Height: 9373,
+ Samplefmt: "fltp",
Sampling: 4733,
Layout: "atmos",
Channels: 83,
@@ -60,6 +63,8 @@ func TestProgressIOWithAVstream(t *testing.T) {
Type: "video",
Codec: "h264",
Coder: "libx264",
+ Profile: 848,
+ Level: 48,
Frame: 39,
Keyframe: 433,
Framerate: struct {
@@ -81,6 +86,7 @@ func TestProgressIOWithAVstream(t *testing.T) {
Quantizer: 2.3,
Width: 4848,
Height: 9373,
+ Samplefmt: "fltp",
Sampling: 4733,
Layout: "atmos",
Channels: 83,
@@ -217,6 +223,8 @@ func TestProgress(t *testing.T) {
Type: "video",
Codec: "h264",
Coder: "libx264",
+ Profile: 848,
+ Level: 48,
Frame: 39,
Keyframe: 433,
Framerate: struct {
@@ -238,6 +246,7 @@ func TestProgress(t *testing.T) {
Quantizer: 2.3,
Width: 4848,
Height: 9373,
+ Samplefmt: "fltp",
Sampling: 4733,
Layout: "atmos",
Channels: 83,
@@ -283,6 +292,8 @@ func TestProgress(t *testing.T) {
Type: "video",
Codec: "h264",
Coder: "libx264",
+ Profile: 848,
+ Level: 48,
Frame: 39,
Keyframe: 433,
Framerate: struct {
@@ -304,6 +315,7 @@ func TestProgress(t *testing.T) {
Quantizer: 2.3,
Width: 4848,
Height: 9373,
+ Samplefmt: "fltp",
Sampling: 4733,
Layout: "atmos",
Channels: 83,
diff --git a/restream/core.go b/restream/core.go
index e9effb7a..6824f83c 100644
--- a/restream/core.go
+++ b/restream/core.go
@@ -187,7 +187,7 @@ func (r *restream) Start() {
t.Restore()
// The filesystem cleanup rules can be set
- r.setCleanup(id, t.Config())
+ r.setCleanup(id, t.config)
return true
})
@@ -279,13 +279,14 @@ func (r *restream) resourceObserver(ctx context.Context, rsc resources.Resources
defer ticker.Stop()
limitCPU, limitMemory := false, false
+ var limitGPUs []bool = nil
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
- cpu, memory := rsc.ShouldLimit()
+ cpu, memory, gpu := rsc.ShouldLimit()
hasChanges := false
@@ -299,17 +300,34 @@ func (r *restream) resourceObserver(ctx context.Context, rsc resources.Resources
hasChanges = true
}
+ if limitGPUs == nil {
+ limitGPUs = make([]bool, len(gpu))
+ }
+
+ for i, g := range gpu {
+ if g != limitGPUs[i] {
+ limitGPUs[i] = g
+ hasChanges = true
+ }
+ }
+
if !hasChanges {
break
}
r.tasks.Range(func(id app.ProcessID, t *task) bool {
- if t.Limit(limitCPU, limitMemory) {
+ limitGPU := false
+ gpuindex := t.GetHWDevice()
+ if gpuindex >= 0 {
+ limitGPU = limitGPUs[gpuindex]
+ }
+ if t.Limit(limitCPU, limitMemory, limitGPU) {
r.logger.Debug().WithFields(log.Fields{
"limit_cpu": limitCPU,
"limit_memory": limitMemory,
+ "limit_gpu": limitGPU,
"id": id,
- }).Log("Limiting process CPU and memory consumption")
+ }).Log("Limiting process CPU, memory, and GPU consumption")
}
return true
@@ -391,7 +409,11 @@ func (r *restream) load() error {
// Validate config with all placeholders replaced. However, we need to take care
// that the config with the task keeps its dynamic placeholders for process starts.
config := t.config.Clone()
- resolveDynamicPlaceholder(config, r.replace)
+ resolveDynamicPlaceholder(config, r.replace, map[string]string{
+ "hwdevice": "0",
+ }, map[string]string{
+ "timestamp": time.Now().UTC().Format(time.RFC3339),
+ })
t.usesDisk, err = validateConfig(config, r.fs.list, r.ffmpeg)
if err != nil {
@@ -414,30 +436,23 @@ func (r *restream) load() error {
}
ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{
- Reconnect: t.config.Reconnect,
- ReconnectDelay: time.Duration(t.config.ReconnectDelay) * time.Second,
- StaleTimeout: time.Duration(t.config.StaleTimeout) * time.Second,
- Timeout: time.Duration(t.config.Timeout) * time.Second,
- LimitCPU: t.config.LimitCPU,
- LimitMemory: t.config.LimitMemory,
- LimitDuration: time.Duration(t.config.LimitWaitFor) * time.Second,
- LimitMode: limitMode,
- Scheduler: t.config.Scheduler,
- Args: t.command,
- Parser: t.parser,
- Logger: t.logger,
- OnArgs: r.onArgs(t.config.Clone()),
- OnBeforeStart: func() error {
- if !r.enableSoftLimit {
- return nil
- }
-
- if err := r.resources.Request(t.config.LimitCPU, t.config.LimitMemory); err != nil {
- return err
- }
-
- return nil
- },
+ Reconnect: t.config.Reconnect,
+ ReconnectDelay: time.Duration(t.config.ReconnectDelay) * time.Second,
+ StaleTimeout: time.Duration(t.config.StaleTimeout) * time.Second,
+ Timeout: time.Duration(t.config.Timeout) * time.Second,
+ LimitCPU: t.config.LimitCPU,
+ LimitMemory: t.config.LimitMemory,
+ LimitGPUUsage: t.config.LimitGPU.Usage,
+ LimitGPUEncoder: t.config.LimitGPU.Encoder,
+ LimitGPUDecoder: t.config.LimitGPU.Decoder,
+ LimitGPUMemory: t.config.LimitGPU.Memory,
+ LimitDuration: time.Duration(t.config.LimitWaitFor) * time.Second,
+ LimitMode: limitMode,
+ Scheduler: t.config.Scheduler,
+ Args: t.command,
+ Parser: t.parser,
+ Logger: t.logger,
+ OnBeforeStart: r.onBeforeStart(t.config.Clone()),
})
if err != nil {
return true
@@ -578,7 +593,11 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
// Validate config with all placeholders replaced. However, we need to take care
// that the config with the task keeps its dynamic placeholders for process starts.
config := t.config.Clone()
- resolveDynamicPlaceholder(config, r.replace)
+ resolveDynamicPlaceholder(config, r.replace, map[string]string{
+ "hwdevice": "0",
+ }, map[string]string{
+ "timestamp": time.Now().UTC().Format(time.RFC3339),
+ })
t.usesDisk, err = validateConfig(config, r.fs.list, r.ffmpeg)
if err != nil {
@@ -600,30 +619,23 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
}
ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{
- Reconnect: t.config.Reconnect,
- ReconnectDelay: time.Duration(t.config.ReconnectDelay) * time.Second,
- StaleTimeout: time.Duration(t.config.StaleTimeout) * time.Second,
- Timeout: time.Duration(t.config.Timeout) * time.Second,
- LimitCPU: t.config.LimitCPU,
- LimitMemory: t.config.LimitMemory,
- LimitDuration: time.Duration(t.config.LimitWaitFor) * time.Second,
- LimitMode: limitMode,
- Scheduler: t.config.Scheduler,
- Args: t.command,
- Parser: t.parser,
- Logger: t.logger,
- OnArgs: r.onArgs(t.config.Clone()),
- OnBeforeStart: func() error {
- if !r.enableSoftLimit {
- return nil
- }
-
- if err := r.resources.Request(t.config.LimitCPU, t.config.LimitMemory); err != nil {
- return err
- }
-
- return nil
- },
+ Reconnect: t.config.Reconnect,
+ ReconnectDelay: time.Duration(t.config.ReconnectDelay) * time.Second,
+ StaleTimeout: time.Duration(t.config.StaleTimeout) * time.Second,
+ Timeout: time.Duration(t.config.Timeout) * time.Second,
+ LimitCPU: t.config.LimitCPU,
+ LimitMemory: t.config.LimitMemory,
+ LimitGPUUsage: t.config.LimitGPU.Usage,
+ LimitGPUEncoder: t.config.LimitGPU.Encoder,
+ LimitGPUDecoder: t.config.LimitGPU.Decoder,
+ LimitGPUMemory: t.config.LimitGPU.Memory,
+ LimitDuration: time.Duration(t.config.LimitWaitFor) * time.Second,
+ LimitMode: limitMode,
+ Scheduler: t.config.Scheduler,
+ Args: t.command,
+ Parser: t.parser,
+ Logger: t.logger,
+ OnBeforeStart: r.onBeforeStart(t.config.Clone()),
})
if err != nil {
return nil, err
@@ -636,21 +648,47 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
return t, nil
}
-// onArgs is a callback that gets called by a process before it will be started.
-// It evalutes the dynamic placeholders in a process config and returns the
-// resulting command line to the process.
-func (r *restream) onArgs(cfg *app.Config) func([]string) []string {
- return func(args []string) []string {
+// onBeforeStart is a callback that gets called by a process before it will be started.
+// It evalutes the dynamic placeholders in a process config and returns the resulting command line to the process.
+func (r *restream) onBeforeStart(cfg *app.Config) func([]string) ([]string, error) {
+ return func(args []string) ([]string, error) {
+ selectedGPU := -1
+ if r.enableSoftLimit {
+ res, err := r.resources.Request(resources.Request{
+ CPU: cfg.LimitCPU,
+ Memory: cfg.LimitMemory,
+ GPUUsage: cfg.LimitGPU.Usage,
+ GPUEncoder: cfg.LimitGPU.Encoder,
+ GPUDecoder: cfg.LimitGPU.Decoder,
+ GPUMemory: cfg.LimitGPU.Memory,
+ })
+ if err != nil {
+ return []string{}, err
+ }
+
+ selectedGPU = res.GPU
+ } else {
+ selectedGPU = 0
+ }
+
+ if t, hasTask := r.tasks.Load(cfg.ProcessID()); hasTask {
+ t.SetHWDevice(selectedGPU)
+ }
+
config := cfg.Clone()
- resolveDynamicPlaceholder(config, r.replace)
+ resolveDynamicPlaceholder(config, r.replace, map[string]string{
+ "hwdevice": fmt.Sprintf("%d", selectedGPU),
+ }, map[string]string{
+ "timestamp": time.Now().UTC().Format(time.RFC3339),
+ })
_, err := validateConfig(config, r.fs.list, r.ffmpeg)
if err != nil {
- return []string{}
+ return []string{}, err
}
- return config.CreateCommand()
+ return config.CreateCommand(), nil
}
}
@@ -1168,7 +1206,7 @@ func (r *restream) updateProcess(id app.ProcessID, config *app.Config) error {
r.tasks.Store(tid, t)
// set filesystem cleanup rules
- r.setCleanup(tid, t.Config())
+ r.setCleanup(tid, t.config)
t.Restore()
@@ -1376,7 +1414,7 @@ func (r *restream) reloadProcess(id app.ProcessID) error {
r.tasks.Store(tid, t)
// set filesystem cleanup rules
- r.setCleanup(tid, t.Config())
+ r.setCleanup(tid, t.config)
t.Restore()
@@ -1448,7 +1486,11 @@ func (r *restream) Probe(config *app.Config, timeout time.Duration) app.Probe {
return probe
}
- resolveDynamicPlaceholder(config, r.replace)
+ resolveDynamicPlaceholder(config, r.replace, map[string]string{
+ "hwdevice": "0",
+ }, map[string]string{
+ "timestamp": time.Now().UTC().Format(time.RFC3339),
+ })
_, err = validateConfig(config, r.fs.list, r.ffmpeg)
if err != nil {
@@ -1712,22 +1754,26 @@ func resolveStaticPlaceholders(config *app.Config, r replace.Replacer) {
// resolveDynamicPlaceholder replaces placeholders in the config that should be replaced at process start.
// The config will be modified in place.
-func resolveDynamicPlaceholder(config *app.Config, r replace.Replacer) {
- vars := map[string]string{
- "timestamp": time.Now().UTC().Format(time.RFC3339),
- }
+func resolveDynamicPlaceholder(config *app.Config, r replace.Replacer, values map[string]string, vars map[string]string) {
+ placeholders := []string{"date", "hwdevice"}
for i, option := range config.Options {
- option = r.Replace(option, "date", "", vars, config, "global")
+ for _, placeholder := range placeholders {
+ option = r.Replace(option, placeholder, values[placeholder], vars, config, "global")
+ }
config.Options[i] = option
}
for i, input := range config.Input {
- input.Address = r.Replace(input.Address, "date", "", vars, config, "input")
+ for _, placeholder := range placeholders {
+ input.Address = r.Replace(input.Address, placeholder, values[placeholder], vars, config, "input")
+ }
for j, option := range input.Options {
- option = r.Replace(option, "date", "", vars, config, "input")
+ for _, placeholder := range placeholders {
+ option = r.Replace(option, placeholder, values[placeholder], vars, config, "input")
+ }
input.Options[j] = option
}
@@ -1736,16 +1782,22 @@ func resolveDynamicPlaceholder(config *app.Config, r replace.Replacer) {
}
for i, output := range config.Output {
- output.Address = r.Replace(output.Address, "date", "", vars, config, "output")
+ for _, placeholder := range placeholders {
+ output.Address = r.Replace(output.Address, placeholder, values[placeholder], vars, config, "output")
+ }
for j, option := range output.Options {
- option = r.Replace(option, "date", "", vars, config, "output")
+ for _, placeholder := range placeholders {
+ option = r.Replace(option, placeholder, values[placeholder], vars, config, "output")
+ }
output.Options[j] = option
}
for j, cleanup := range output.Cleanup {
- cleanup.Pattern = r.Replace(cleanup.Pattern, "date", "", vars, config, "output")
+ for _, placeholder := range placeholders {
+ cleanup.Pattern = r.Replace(cleanup.Pattern, placeholder, values[placeholder], vars, config, "output")
+ }
output.Cleanup[j] = cleanup
}
diff --git a/restream/core_test.go b/restream/core_test.go
index aad54bec..d0c4fb5e 100644
--- a/restream/core_test.go
+++ b/restream/core_test.go
@@ -1,6 +1,7 @@
package restream
import (
+ "bytes"
"fmt"
"math/rand"
"os"
@@ -12,10 +13,11 @@ import (
"github.com/datarhei/core/v16/iam"
iamidentity "github.com/datarhei/core/v16/iam/identity"
"github.com/datarhei/core/v16/iam/policy"
+ mock "github.com/datarhei/core/v16/internal/mock/resources"
"github.com/datarhei/core/v16/internal/testhelper"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/net"
- "github.com/datarhei/core/v16/psutil"
+ "github.com/datarhei/core/v16/resources"
"github.com/datarhei/core/v16/restream/app"
rfs "github.com/datarhei/core/v16/restream/fs"
"github.com/datarhei/core/v16/restream/replace"
@@ -25,18 +27,26 @@ import (
"github.com/stretchr/testify/require"
)
-func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) {
- binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper")
+func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer, limits bool) (Restreamer, error) {
+ binary, err := testhelper.BuildBinary("ffmpeg")
if err != nil {
return nil, fmt.Errorf("failed to build helper program: %w", err)
}
+ var res resources.Resources
+ if limits {
+ res = mock.NewWithLimits()
+ } else {
+ res = mock.New()
+ }
+
ffmpeg, err := ffmpeg.New(ffmpeg.Config{
Binary: binary,
LogHistoryLength: 3,
Portrange: portrange,
ValidatorInput: validatorIn,
ValidatorOutput: validatorOut,
+ Resource: res,
})
if err != nil {
return nil, err
@@ -85,6 +95,7 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp
Replace: replacer,
Filesystems: []fs.Filesystem{memfs},
Rewrite: rewriter,
+ Resources: res,
})
if err != nil {
return nil, err
@@ -131,7 +142,7 @@ func getDummyProcess() *app.Config {
}
func TestAddProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -153,7 +164,7 @@ func TestAddProcess(t *testing.T) {
}
func TestAutostartProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -170,7 +181,7 @@ func TestAutostartProcess(t *testing.T) {
}
func TestAddInvalidProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
// Invalid process ID
@@ -238,7 +249,7 @@ func TestAddInvalidProcess(t *testing.T) {
}
func TestRemoveProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -255,7 +266,7 @@ func TestRemoveProcess(t *testing.T) {
}
func TestUpdateProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process1 := getDummyProcess()
@@ -306,7 +317,7 @@ func TestUpdateProcess(t *testing.T) {
}
func TestUpdateSameHashProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
config := getDummyProcess()
@@ -335,7 +346,7 @@ func TestUpdateSameHashProcess(t *testing.T) {
}
func TestUpdateProcessLogHistoryTransfer(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
p := getDummyProcess()
@@ -389,7 +400,7 @@ func TestUpdateProcessLogHistoryTransfer(t *testing.T) {
}
func TestUpdateProcessMetadataTransfer(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
p := getDummyProcess()
@@ -424,7 +435,7 @@ func TestUpdateProcessMetadataTransfer(t *testing.T) {
}
func TestGetProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process1 := getDummyProcess()
@@ -491,7 +502,7 @@ func TestGetProcess(t *testing.T) {
}
func TestStartProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -517,8 +528,101 @@ func TestStartProcess(t *testing.T) {
rs.StopProcess(tid)
}
+func TestStartProcessWithLimits(t *testing.T) {
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, true)
+ require.NoError(t, err)
+
+ process := getDummyProcess()
+ process.LimitCPU = 1
+ process.LimitMemory = 1
+ process.LimitGPU = app.ConfigLimitGPU{
+ Usage: 1,
+ Encoder: 1,
+ Decoder: 1,
+ Memory: 1,
+ }
+ process.Options = append(process.Options, "-hwdevice", "{hwdevice}")
+ tid := app.ProcessID{ID: process.ID}
+
+ rs.AddProcess(process)
+
+ err = rs.StartProcess(tid)
+ require.Equal(t, nil, err, "should be able to start existing process")
+
+ state, _ := rs.GetProcessState(tid)
+ require.Equal(t, "start", state.Order, "Process should be started")
+
+ require.Equal(t, []string{
+ "-loglevel", "info", "-hwdevice", "0", "-f", "lavfi", "-re", "-i", "testsrc=size=1280x720:rate=25", "-codec", "copy", "-f", "null", "-",
+ }, state.Command)
+
+ rs.StopProcess(tid)
+}
+
+func TestProcessResources(t *testing.T) {
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
+ require.NoError(t, err)
+
+ process := getDummyProcess()
+ tid := app.ProcessID{ID: process.ID}
+
+ rs.AddProcess(process)
+
+ err = rs.StartProcess(tid)
+ require.Equal(t, nil, err, "should be able to start existing process")
+
+ time.Sleep(2 * time.Second)
+
+ state, _ := rs.GetProcessState(tid)
+ require.Equal(t, "start", state.Order, "Process should be started")
+
+ require.Equal(t, app.ProcessUsage{
+ CPU: app.ProcessUsageCPU{
+ NCPU: 2,
+ Current: 12,
+ Average: 12,
+ Max: 12,
+ Limit: 0,
+ IsThrottling: false,
+ },
+ Memory: app.ProcessUsageMemory{
+ Current: 42,
+ Average: 42,
+ Max: 42,
+ Limit: 0,
+ },
+ GPU: app.ProcessUsageGPU{
+ Index: 0,
+ Usage: app.ProcessUsageGPUUsage{
+ Current: 5,
+ Average: 5,
+ Max: 5,
+ Limit: 0,
+ },
+ Encoder: app.ProcessUsageGPUUsage{
+ Current: 9,
+ Average: 9,
+ Max: 9,
+ Limit: 0,
+ },
+ Decoder: app.ProcessUsageGPUUsage{
+ Current: 11,
+ Average: 11,
+ Max: 11,
+ Limit: 0,
+ },
+ Memory: app.ProcessUsageGPUMemory{
+ Current: 42,
+ Average: 42,
+ Max: 42,
+ Limit: 0,
+ },
+ },
+ }, state.Resources)
+}
+
func TestStopProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -544,7 +648,7 @@ func TestStopProcess(t *testing.T) {
}
func TestRestartProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -570,7 +674,7 @@ func TestRestartProcess(t *testing.T) {
}
func TestReloadProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -602,7 +706,7 @@ func TestReloadProcess(t *testing.T) {
}
func TestParseProcessPattern(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -625,7 +729,7 @@ func TestParseProcessPattern(t *testing.T) {
}
func TestProbeProcess(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -635,7 +739,7 @@ func TestProbeProcess(t *testing.T) {
}
func TestProbeProcessWithReference(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -651,7 +755,7 @@ func TestProbeProcessWithReference(t *testing.T) {
}
func TestProcessMetadata(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -676,7 +780,7 @@ func TestProcessMetadata(t *testing.T) {
}
func TestLog(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -715,7 +819,7 @@ func TestLog(t *testing.T) {
}
func TestLogTransfer(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -741,7 +845,7 @@ func TestLogTransfer(t *testing.T) {
}
func TestPlayoutNoRange(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -765,7 +869,7 @@ func TestPlayoutRange(t *testing.T) {
portrange, err := net.NewPortrange(3000, 3001)
require.NoError(t, err)
- rs, err := getDummyRestreamer(portrange, nil, nil, nil)
+ rs, err := getDummyRestreamer(portrange, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -821,7 +925,7 @@ func TestParseAddressReference(t *testing.T) {
}
func TestAddressReference(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process1 := getDummyProcess()
@@ -852,7 +956,7 @@ func TestAddressReference(t *testing.T) {
}
func TestTeeAddressReference(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process1 := getDummyProcess()
@@ -898,7 +1002,7 @@ func TestTeeAddressReference(t *testing.T) {
}
func TestConfigValidation(t *testing.T) {
- rsi, err := getDummyRestreamer(nil, nil, nil, nil)
+ rsi, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
rs := rsi.(*restream)
@@ -946,7 +1050,7 @@ func TestConfigValidation(t *testing.T) {
}
func TestConfigValidationWithMkdir(t *testing.T) {
- rsi, err := getDummyRestreamer(nil, nil, nil, nil)
+ rsi, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
rs := rsi.(*restream)
@@ -990,7 +1094,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil)
require.NoError(t, err)
- rsi, err := getDummyRestreamer(nil, valIn, valOut, nil)
+ rsi, err := getDummyRestreamer(nil, valIn, valOut, nil, false)
require.NoError(t, err)
rs := rsi.(*restream)
@@ -1016,7 +1120,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
}
func TestOutputAddressValidation(t *testing.T) {
- rsi, err := getDummyRestreamer(nil, nil, nil, nil)
+ rsi, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
rs := rsi.(*restream)
@@ -1057,7 +1161,7 @@ func TestOutputAddressValidation(t *testing.T) {
}
func TestMetadata(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -1137,7 +1241,7 @@ func TestReplacer(t *testing.T) {
Input: []app.ConfigIO{
{
ID: "in_{processid}_{reference}",
- Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}",
+ Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}_hwdevice:{hwdevice}",
Options: []string{
"-f",
"lavfi",
@@ -1149,6 +1253,7 @@ func TestReplacer(t *testing.T) {
"memfs:{memfs}/mem.txt",
"fsdisk:{fs:disk}/fsdisk_{date,format=%Y%m%d_%H%M%S}.txt",
"fsmem:{fs:mem}/$inputid.txt",
+ "hwdevice:{hwdevice}",
},
},
},
@@ -1186,6 +1291,7 @@ func TestReplacer(t *testing.T) {
"{memfs}/foobar_in_mem.txt",
"{fs:disk}/foobar_on_disk_aswell.txt",
"{fs:mem}/foobar_in_mem_aswell.txt",
+ "hwdevice:{hwdevice}",
},
Reconnect: true,
ReconnectDelay: 10,
@@ -1202,7 +1308,7 @@ func TestReplacer(t *testing.T) {
Input: []app.ConfigIO{
{
ID: "in_314159265359_refref",
- Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar",
+ Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar_hwdevice:{hwdevice}",
Options: []string{
"-f",
"lavfi",
@@ -1214,6 +1320,7 @@ func TestReplacer(t *testing.T) {
"memfs:http://localhost/mnt/memfs/mem.txt",
"fsdisk:/mnt/diskfs/fsdisk_{date,format=%Y%m%d_%H%M%S}.txt",
"fsmem:http://localhost/mnt/memfs/$inputid.txt",
+ "hwdevice:{hwdevice}",
},
},
},
@@ -1251,6 +1358,7 @@ func TestReplacer(t *testing.T) {
"{memfs}/foobar_in_mem.txt",
"/mnt/diskfs/foobar_on_disk_aswell.txt",
"http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
+ "hwdevice:{hwdevice}",
},
Reconnect: true,
ReconnectDelay: 10,
@@ -1260,12 +1368,23 @@ func TestReplacer(t *testing.T) {
require.Equal(t, wantprocess, process)
- resolveDynamicPlaceholder(process, replacer)
+ resolveDynamicPlaceholder(process, replacer, map[string]string{
+ "hwdevice": fmt.Sprintf("%d", -1),
+ }, nil)
+ wantprocess.Options = []string{
+ "-loglevel",
+ "info",
+ "/mnt/diskfs/foobar_on_disk.txt",
+ "{memfs}/foobar_in_mem.txt",
+ "/mnt/diskfs/foobar_on_disk_aswell.txt",
+ "http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
+ "hwdevice:-1",
+ }
wantprocess.Input = []app.ConfigIO{
{
ID: "in_314159265359_refref",
- Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar",
+ Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar_hwdevice:-1",
Options: []string{
"-f",
"lavfi",
@@ -1277,6 +1396,7 @@ func TestReplacer(t *testing.T) {
"memfs:http://localhost/mnt/memfs/mem.txt",
"fsdisk:/mnt/diskfs/fsdisk_20191012_072050.txt",
"fsmem:http://localhost/mnt/memfs/$inputid.txt",
+ "hwdevice:-1",
},
},
}
@@ -1339,7 +1459,7 @@ func TestProcessReplacer(t *testing.T) {
"latency": "20000", // 20 milliseconds, FFmpeg requires microseconds
})
- rsi, err := getDummyRestreamer(nil, nil, nil, replacer)
+ rsi, err := getDummyRestreamer(nil, nil, nil, replacer, false)
require.NoError(t, err)
process := &app.Config{
@@ -1360,6 +1480,7 @@ func TestProcessReplacer(t *testing.T) {
"memfs:{memfs}/mem.txt",
"fsdisk:{fs:disk}/fsdisk_{date,format=%Y%m%d_%H%M%S}.txt",
"fsmem:{fs:mem}/$inputid.txt",
+ "hwdevice:{hwdevice}",
},
},
},
@@ -1397,6 +1518,7 @@ func TestProcessReplacer(t *testing.T) {
"{memfs}/foobar_in_mem.txt",
"{fs:disk}/foobar_on_disk_aswell.txt",
"{fs:mem}/foobar_in_mem_aswell.txt",
+ "hwdevice:{hwdevice}",
},
Reconnect: true,
ReconnectDelay: 10,
@@ -1428,6 +1550,7 @@ func TestProcessReplacer(t *testing.T) {
"memfs:http://localhost/mnt/memfs/mem.txt",
"fsdisk:/mnt/diskfs/fsdisk_{date,format=%Y%m%d_%H%M%S}.txt",
"fsmem:http://localhost/mnt/memfs/$inputid.txt",
+ "hwdevice:{hwdevice}",
},
Cleanup: []app.ConfigIOCleanup{},
},
@@ -1466,6 +1589,7 @@ func TestProcessReplacer(t *testing.T) {
"{memfs}/foobar_in_mem.txt",
"/mnt/diskfs/foobar_on_disk_aswell.txt",
"http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
+ "hwdevice:{hwdevice}",
},
Reconnect: true,
ReconnectDelay: 10,
@@ -1478,10 +1602,53 @@ func TestProcessReplacer(t *testing.T) {
require.True(t, ok)
require.Equal(t, process, task.config)
+
+ err = rsi.StartProcess(app.ProcessID{ID: "314159265359"})
+ require.NoError(t, err)
+
+ state, err := rsi.GetProcessState(app.ProcessID{ID: "314159265359"})
+ require.NoError(t, err)
+
+ require.Equal(t, []string{
+ "-loglevel",
+ "info",
+ "/mnt/diskfs/foobar_on_disk.txt",
+ "{memfs}/foobar_in_mem.txt",
+ "/mnt/diskfs/foobar_on_disk_aswell.txt",
+ "http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
+ "hwdevice:0",
+ "-f",
+ "lavfi",
+ "-re",
+ "input:in_314159265359_refref",
+ "process:314159265359",
+ "reference:refref",
+ "diskfs:/mnt/diskfs/disk.txt",
+ "memfs:http://localhost/mnt/memfs/mem.txt",
+ "fsdisk:/mnt/diskfs/fsdisk_20191012_072050.txt",
+ "fsmem:http://localhost/mnt/memfs/$inputid.txt",
+ "hwdevice:0",
+ "-i",
+ "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar",
+ "-codec",
+ "copy",
+ "-f",
+ "null",
+ "output:out_314159265359_refref",
+ "process:314159265359",
+ "reference:refref",
+ "diskfs:/mnt/diskfs/disk.txt",
+ "memfs:http://localhost/mnt/memfs/mem.txt",
+ "fsdisk:/mnt/diskfs/fsdisk.txt",
+ "fsmem:http://localhost/mnt/memfs/$outputid.txt",
+ "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar",
+ }, state.Command)
+
+ rsi.StopProcess(app.ProcessID{ID: "314159265359"})
}
func TestProcessLogPattern(t *testing.T) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -1512,7 +1679,7 @@ func TestProcessLogPattern(t *testing.T) {
}
func TestProcessLimit(t *testing.T) {
- rsi, err := getDummyRestreamer(nil, nil, nil, nil)
+ rsi, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(t, err)
process := getDummyProcess()
@@ -1530,15 +1697,14 @@ func TestProcessLimit(t *testing.T) {
status := task.ffmpeg.Status()
- ncpu, err := psutil.CPUCounts(true)
- require.NoError(t, err)
+ ncpu := rs.resources.Info().CPU.NCPU
require.Equal(t, ncpu*process.LimitCPU, status.CPU.Limit)
require.Equal(t, process.LimitMemory, status.Memory.Limit)
}
func BenchmarkGetProcessIDs(b *testing.B) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(b, err)
for i := 0; i < 1000; i++ {
@@ -1559,7 +1725,7 @@ func BenchmarkGetProcessIDs(b *testing.B) {
}
func BenchmarkGetProcess(b *testing.B) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(b, err)
for i := 0; i < 1000; i++ {
@@ -1583,7 +1749,7 @@ func BenchmarkGetProcess(b *testing.B) {
}
func BenchmarkGetProcessState(b *testing.B) {
- rs, err := getDummyRestreamer(nil, nil, nil, nil)
+ rs, err := getDummyRestreamer(nil, nil, nil, nil, false)
require.NoError(b, err)
n := 10
@@ -1614,3 +1780,97 @@ func BenchmarkGetProcessState(b *testing.B) {
rs.DeleteProcess(app.NewProcessID("test_"+strconv.Itoa(n), ""))
}
}
+
+func TestProcessCleanup(t *testing.T) {
+ rsi, err := getDummyRestreamer(nil, nil, nil, nil, false)
+ require.NoError(t, err)
+
+ rsi.Start()
+
+ rs := rsi.(*restream)
+
+ memfs, ok := rs.fs.list[0].(fs.Filesystem)
+ require.True(t, ok)
+
+ for i := 0; i < 10; i++ {
+ memfs.WriteFileReader(fmt.Sprintf("/foobar_%02d.dat", i), bytes.NewReader([]byte("hello")), -1)
+ }
+
+ files := memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+ require.Equal(t, 10, len(files))
+
+ process := getDummyProcess()
+ process.ID = "foobar"
+ output := process.Output[0]
+ output.Cleanup = append(output.Cleanup, app.ConfigIOCleanup{
+ Pattern: "mem:/{processid}_*",
+ MaxFiles: 5,
+ MaxFileAge: 0,
+ PurgeOnDelete: true,
+ })
+ process.Output[0] = output
+
+ err = rsi.AddProcess(process)
+ require.NoError(t, err)
+
+ require.Eventually(t, func() bool {
+ files := memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+
+ return len(files) == 5
+ }, 15*time.Second, time.Second)
+
+ rsi.Stop()
+
+ for i := 0; i < 10; i++ {
+ memfs.WriteFileReader(fmt.Sprintf("/foobar_%02d.dat", i), bytes.NewReader([]byte("hello")), -1)
+ }
+
+ files = memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+ require.Equal(t, 10, len(files))
+
+ rsi.ReloadProcess(app.ProcessID{ID: process.ID})
+
+ rsi.Start()
+
+ require.Eventually(t, func() bool {
+ files := memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+
+ return len(files) == 5
+ }, 15*time.Second, time.Second)
+
+ rsi.Stop()
+
+ for i := 0; i < 10; i++ {
+ memfs.WriteFileReader(fmt.Sprintf("/foobar_%02d.dat", i), bytes.NewReader([]byte("hello")), -1)
+ }
+
+ files = memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+ require.Equal(t, 10, len(files))
+
+ process.Reference = "foobar"
+ rsi.UpdateProcess(app.ProcessID{ID: process.ID}, process)
+
+ rsi.Start()
+
+ require.Eventually(t, func() bool {
+ files := memfs.List("/", fs.ListOptions{
+ Pattern: "/foobar_*",
+ })
+
+ return len(files) == 5
+ }, 15*time.Second, time.Second)
+
+ rsi.Stop()
+
+ //task, ok := rs.tasks.Load(app.ProcessID{ID: process.ID})
+}
diff --git a/restream/task.go b/restream/task.go
index 611702ef..40cb74c4 100644
--- a/restream/task.go
+++ b/restream/task.go
@@ -3,6 +3,7 @@ package restream
import (
"errors"
"maps"
+ "sync/atomic"
"time"
"github.com/datarhei/core/v16/ffmpeg/parse"
@@ -31,7 +32,8 @@ type task struct {
parser parse.Parser
playout map[string]int
logger log.Logger
- usesDisk bool // Whether this task uses the disk
+ usesDisk bool // Whether this task uses the disk
+ hwdevice atomic.Int32 // Index of the GPU this task uses
metadata map[string]interface{}
lock *xsync.RBMutex
@@ -234,8 +236,47 @@ func (t *task) State() (*app.State, error) {
state.Memory = status.Memory.Current
state.CPU = status.CPU.Current / status.CPU.NCPU
state.LimitMode = status.LimitMode
- state.Resources.CPU = status.CPU
- state.Resources.Memory = status.Memory
+ state.Resources.CPU = app.ProcessUsageCPU{
+ NCPU: status.CPU.NCPU,
+ Current: status.CPU.Current,
+ Average: status.CPU.Average,
+ Max: status.CPU.Max,
+ Limit: status.CPU.Limit,
+ IsThrottling: status.CPU.IsThrottling,
+ }
+ state.Resources.Memory = app.ProcessUsageMemory{
+ Current: status.Memory.Current,
+ Average: status.Memory.Average,
+ Max: status.Memory.Max,
+ Limit: status.Memory.Limit,
+ }
+ state.Resources.GPU = app.ProcessUsageGPU{
+ Index: status.GPU.Index,
+ Usage: app.ProcessUsageGPUUsage{
+ Current: status.GPU.Usage.Current,
+ Average: status.GPU.Usage.Average,
+ Max: status.GPU.Usage.Max,
+ Limit: status.GPU.Usage.Limit,
+ },
+ Encoder: app.ProcessUsageGPUUsage{
+ Current: status.GPU.Encoder.Current,
+ Average: status.GPU.Encoder.Average,
+ Max: status.GPU.Encoder.Max,
+ Limit: status.GPU.Encoder.Limit,
+ },
+ Decoder: app.ProcessUsageGPUUsage{
+ Current: status.GPU.Decoder.Current,
+ Average: status.GPU.Decoder.Average,
+ Max: status.GPU.Decoder.Max,
+ Limit: status.GPU.Decoder.Limit,
+ },
+ Memory: app.ProcessUsageGPUMemory{
+ Current: status.GPU.Memory.Current,
+ Average: status.GPU.Memory.Average,
+ Max: status.GPU.Memory.Max,
+ Limit: status.GPU.Memory.Limit,
+ },
+ }
state.Duration = status.Duration.Round(10 * time.Millisecond).Seconds()
state.Reconnect = -1
state.Command = status.CommandArgs
@@ -420,7 +461,7 @@ func (t *task) ExportMetadata() map[string]interface{} {
return t.metadata
}
-func (t *task) Limit(cpu, memory bool) bool {
+func (t *task) Limit(cpu, memory, gpu bool) bool {
token := t.lock.RLock()
defer t.lock.RUnlock(token)
@@ -428,11 +469,19 @@ func (t *task) Limit(cpu, memory bool) bool {
return false
}
- t.ffmpeg.Limit(cpu, memory)
+ t.ffmpeg.Limit(cpu, memory, gpu)
return true
}
+func (t *task) SetHWDevice(index int) {
+ t.hwdevice.Store(int32(index))
+}
+
+func (t *task) GetHWDevice() int {
+ return int(t.hwdevice.Load())
+}
+
func (t *task) Equal(config *app.Config) bool {
token := t.lock.RLock()
defer t.lock.RUnlock(token)
@@ -448,11 +497,11 @@ func (t *task) Config() *app.Config {
token := t.lock.RLock()
defer t.lock.RUnlock(token)
- if t.config == nil {
+ if t.process == nil {
return nil
}
- return t.config.Clone()
+ return t.process.Config.Clone()
}
func (t *task) Destroy() {
diff --git a/service/api/api.go b/service/api/api.go
index 0ec47d86..266824c4 100644
--- a/service/api/api.go
+++ b/service/api/api.go
@@ -1,7 +1,6 @@
package api
import (
- "bytes"
"errors"
"fmt"
"io"
@@ -11,6 +10,7 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/log"
+ "github.com/datarhei/core/v16/mem"
)
type API interface {
@@ -87,13 +87,13 @@ func (e statusError) Is(target error) bool {
type copyReader struct {
reader io.Reader
- copy *bytes.Buffer
+ copy *mem.Buffer
}
func newCopyReader(r io.Reader) io.Reader {
c := ©Reader{
reader: r,
- copy: new(bytes.Buffer),
+ copy: mem.Get(),
}
return c
@@ -105,8 +105,8 @@ func (c *copyReader) Read(p []byte) (int, error) {
c.copy.Write(p)
if err == io.EOF {
- c.reader = c.copy
- c.copy = &bytes.Buffer{}
+ c.reader = c.copy.Reader()
+ c.copy = mem.Get()
}
return i, err
@@ -227,8 +227,10 @@ func (a *api) call(method, path string, body io.Reader) ([]byte, error) {
}
func (a *api) Monitor(id string, monitordata MonitorData) (MonitorResponse, error) {
- var data bytes.Buffer
- encoder := json.NewEncoder(&data)
+ data := mem.Get()
+ defer mem.Put(data)
+
+ encoder := json.NewEncoder(data)
if err := encoder.Encode(monitordata); err != nil {
return MonitorResponse{}, err
}
@@ -240,7 +242,7 @@ func (a *api) Monitor(id string, monitordata MonitorData) (MonitorResponse, erro
}
*/
- response, err := a.callWithRetry(http.MethodPut, "api/v1/core/monitor/"+id, &data)
+ response, err := a.callWithRetry(http.MethodPut, "api/v1/core/monitor/"+id, data.Reader())
if err != nil {
return MonitorResponse{}, fmt.Errorf("error sending request: %w", err)
}
diff --git a/session/collector.go b/session/collector.go
index 1dd0489d..98eda19b 100644
--- a/session/collector.go
+++ b/session/collector.go
@@ -9,9 +9,8 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/log"
+ "github.com/datarhei/core/v16/math/average"
"github.com/datarhei/core/v16/net"
-
- "github.com/prep/average"
)
// Session represents an active session
@@ -239,8 +238,8 @@ type collector struct {
maxTxBitrate float64
maxSessions uint64
- rxBitrate *average.SlidingWindow
- txBitrate *average.SlidingWindow
+ rxBitrate *average.SMA
+ txBitrate *average.SMA
collectHistory bool
history history
@@ -410,8 +409,8 @@ func (c *collector) start() {
c.running = true
- c.rxBitrate, _ = average.New(averageWindow, averageGranularity)
- c.txBitrate, _ = average.New(averageWindow, averageGranularity)
+ c.rxBitrate, _ = average.NewSMA(averageWindow, averageGranularity)
+ c.txBitrate, _ = average.NewSMA(averageWindow, averageGranularity)
}
func (c *collector) stop() {
@@ -648,7 +647,7 @@ func (c *collector) Ingress(id string, size int64) {
}
if sess.Ingress(size) {
- c.rxBitrate.Add(size * 8)
+ c.rxBitrate.Add(float64(size) * 8)
c.rxBytes += uint64(size)
}
}
@@ -667,7 +666,7 @@ func (c *collector) Egress(id string, size int64) {
}
if sess.Egress(size) {
- c.txBitrate.Add(size * 8)
+ c.txBitrate.Add(float64(size) * 8)
c.txBytes += uint64(size)
}
}
@@ -709,11 +708,11 @@ func (c *collector) IsSessionsExceeded() bool {
}
func (c *collector) IngressBitrate() float64 {
- return c.rxBitrate.Average(averageWindow)
+ return c.rxBitrate.Average()
}
func (c *collector) EgressBitrate() float64 {
- return c.txBitrate.Average(averageWindow)
+ return c.txBitrate.Average()
}
func (c *collector) MaxIngressBitrate() float64 {
diff --git a/session/registry.go b/session/registry.go
index 0dd54af6..8a454013 100644
--- a/session/registry.go
+++ b/session/registry.go
@@ -196,15 +196,9 @@ func (r *registry) sessionPersister(pattern *strftime.Strftime, bufferDuration t
"buffer": bufferDuration,
}).Log("Session persister started")
- buffer := &bytes.Buffer{}
path := pattern.FormatString(time.Now())
- file := r.persist.fs.Open(path)
- if file != nil {
- buffer.ReadFrom(file)
- file.Close()
- }
-
+ buffer := &bytes.Buffer{}
enc := json.NewEncoder(buffer)
ticker := time.NewTicker(bufferDuration)
@@ -222,7 +216,7 @@ loop:
currentPath := pattern.FormatString(session.ClosedAt)
if currentPath != path && session.ClosedAt.After(splitTime) {
if buffer.Len() > 0 {
- _, _, err := r.persist.fs.WriteFileSafe(path, buffer.Bytes())
+ _, err := r.persist.fs.AppendFileReader(path, buffer, -1)
if err != nil {
r.logger.Error().WithError(err).WithField("path", path).Log("")
}
@@ -239,7 +233,7 @@ loop:
enc.Encode(&session)
case t := <-ticker.C:
if buffer.Len() > 0 {
- _, _, err := r.persist.fs.WriteFileSafe(path, buffer.Bytes())
+ _, err := r.persist.fs.AppendFileReader(path, buffer, -1)
if err != nil {
r.logger.Error().WithError(err).WithField("path", path).Log("")
} else {
@@ -260,7 +254,7 @@ loop:
}
if buffer.Len() > 0 {
- _, _, err := r.persist.fs.WriteFileSafe(path, buffer.Bytes())
+ _, err := r.persist.fs.AppendFileReader(path, buffer, -1)
if err != nil {
r.logger.Error().WithError(err).WithField("path", path).Log("")
} else {
diff --git a/session/registry_test.go b/session/registry_test.go
index 7b1d987d..5cba9ec1 100644
--- a/session/registry_test.go
+++ b/session/registry_test.go
@@ -8,6 +8,7 @@ import (
"time"
"github.com/datarhei/core/v16/io/fs"
+
"github.com/lestrrat-go/strftime"
"github.com/stretchr/testify/require"
)
diff --git a/session/session.go b/session/session.go
index d727de7f..e943cea3 100644
--- a/session/session.go
+++ b/session/session.go
@@ -6,7 +6,7 @@ import (
"time"
"github.com/datarhei/core/v16/log"
- "github.com/prep/average"
+ "github.com/datarhei/core/v16/math/average"
)
type session struct {
@@ -27,10 +27,10 @@ type session struct {
timeout time.Duration
callback func(*session)
- rxBitrate *average.SlidingWindow
+ rxBitrate *average.SMA
rxBytes uint64
- txBitrate *average.SlidingWindow
+ txBitrate *average.SMA
txBytes uint64
tickerStop context.CancelFunc
@@ -59,8 +59,8 @@ func (s *session) Init(id, reference string, closeCallback func(*session), inact
s.peer = ""
s.extra = map[string]interface{}{}
- s.rxBitrate, _ = average.New(averageWindow, averageGranularity)
- s.txBitrate, _ = average.New(averageWindow, averageGranularity)
+ s.rxBitrate, _ = average.NewSMA(averageWindow, averageGranularity)
+ s.txBitrate, _ = average.NewSMA(averageWindow, averageGranularity)
s.topRxBitrate = 0.0
s.topTxBitrate = 0.0
@@ -105,8 +105,8 @@ func (s *session) close() {
s.tickerStop = nil
}
- s.rxBitrate.Stop()
- s.txBitrate.Stop()
+ s.rxBitrate.Reset()
+ s.txBitrate.Reset()
go s.callback(s)
}
@@ -157,10 +157,10 @@ func (s *session) Ingress(size int64) bool {
s.stale.Stop()
s.stale.Reset(s.timeout)
- s.rxBitrate.Add(size * 8)
+ s.rxBitrate.Add(float64(size) * 8)
s.rxBytes += uint64(size)
- bitrate := s.rxBitrate.Average(averageWindow)
+ bitrate := s.rxBitrate.Average()
if bitrate > s.topRxBitrate {
s.topRxBitrate = bitrate
}
@@ -183,10 +183,10 @@ func (s *session) Egress(size int64) bool {
s.stale.Stop()
s.stale.Reset(s.timeout)
- s.txBitrate.Add(size * 8)
+ s.txBitrate.Add(float64(size) * 8)
s.txBytes += uint64(size)
- bitrate := s.txBitrate.Average(averageWindow)
+ bitrate := s.txBitrate.Average()
if bitrate > s.topTxBitrate {
s.topTxBitrate = bitrate
}
@@ -199,11 +199,11 @@ func (s *session) Egress(size int64) bool {
}
func (s *session) RxBitrate() float64 {
- return s.rxBitrate.Average(averageWindow)
+ return s.rxBitrate.Average()
}
func (s *session) TxBitrate() float64 {
- return s.txBitrate.Average(averageWindow)
+ return s.txBitrate.Average()
}
func (s *session) TopRxBitrate() float64 {
diff --git a/time/source.go b/time/source.go
new file mode 100644
index 00000000..83eff7dd
--- /dev/null
+++ b/time/source.go
@@ -0,0 +1,25 @@
+package time
+
+import "time"
+
+type Source interface {
+ Now() time.Time
+}
+
+type StdSource struct{}
+
+func (s *StdSource) Now() time.Time {
+ return time.Now()
+}
+
+type TestSource struct {
+ N time.Time
+}
+
+func (t *TestSource) Now() time.Time {
+ return t.N
+}
+
+func (t *TestSource) Set(sec int64, nsec int64) {
+ t.N = time.Unix(sec, nsec)
+}
diff --git a/update/update.go b/update/update.go
index 331f6da0..24a88e1f 100644
--- a/update/update.go
+++ b/update/update.go
@@ -1,7 +1,6 @@
package update
import (
- "bytes"
"context"
"fmt"
"io"
@@ -12,6 +11,7 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/log"
+ "github.com/datarhei/core/v16/mem"
"github.com/datarhei/core/v16/monitor/metric"
"golang.org/x/mod/semver"
)
@@ -156,15 +156,16 @@ func (s *checker) check() error {
Timeout: 5 * time.Second,
}
- var data bytes.Buffer
- encoder := json.NewEncoder(&data)
+ data := mem.Get()
+ defer mem.Put(data)
+ encoder := json.NewEncoder(data)
if err := encoder.Encode(&request); err != nil {
return err
}
s.logger.Debug().WithField("request", data.String()).Log("")
- req, err := http.NewRequest(http.MethodPut, "https://service.datarhei.com/api/v1/app_version", &data)
+ req, err := http.NewRequest(http.MethodPut, "https://service.datarhei.com/api/v1/app_version", data.Reader())
if err != nil {
return err
}
diff --git a/vendor/github.com/99designs/gqlgen/.golangci.yml b/vendor/github.com/99designs/gqlgen/.golangci.yml
index 098727cb..79d6627e 100644
--- a/vendor/github.com/99designs/gqlgen/.golangci.yml
+++ b/vendor/github.com/99designs/gqlgen/.golangci.yml
@@ -106,6 +106,10 @@ issues:
- path: codegen/testserver/.*/resolver\.go
linters:
- gocritic
+ # The interfaces are autogenerated and don't conform to the paramTypeCombine rule
+ - path: _examples/federation/products/graph/entity.resolvers.go
+ linters:
+ - gocritic
# Disable revive.use-any for backwards compatibility
- path: graphql/map.go
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
@@ -113,3 +117,11 @@ issues:
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
- path: codegen/testserver/singlefile/resolver.go
text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
+ - path: codegen/testserver/generated_test.go
+ linters:
+ - staticcheck
+ text: SA1019
+ - path: plugin/modelgen/models_test.go
+ linters:
+ - staticcheck
+ text: SA1019
diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md
index acea9f11..bbcc1722 100644
--- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md
+++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md
@@ -5,10 +5,2085 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.45...HEAD)
+## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.50...HEAD)
+
+## [v0.17.50](https://github.com/99designs/gqlgen/compare/v0.17.49...v0.17.50) - 2024-09-13
+- a6d5d843 release v0.17.50
+
+- f154d99d Fix Nancy to use Go 1.22
+
+- 6b9e40e8 make rewrite default for resolver layout single-file (#3243)
+
+
1855758d chore(deps): bump dset in /integration in the npm_and_yarn group (#3268)
+
+Bumps the npm_and_yarn group in /integration with 1 update: [dset](https://github.com/lukeed/dset).
+
+
+Updates `dset` from 3.1.3 to 3.1.4
+- [Release notes](https://github.com/lukeed/dset/releases)
+- [Commits](https://github.com/lukeed/dset/compare/v3.1.3...v3.1.4)
+
+---
+updated-dependencies:
+- dependency-name: dset
+ dependency-type: indirect
+ dependency-group: npm_and_yarn
+...
+
+
+
+fda0539e Bump some more module versions (#3262)
+
+* Bump some more module versions
+
+
+* Update aurora
+
+
+* Avoid upgrade to go 1.23
+
+
+* downgrade goquery to support pre-Go 1.23 for now
+
+
+* Downgrade moq to support pre-Go 1.23 as well
+
+
+---------
+
+
+
+- 59f0d04c Bump golang.org/x/net 0.29 (#3261)
+
+cf42b253 chore(deps): bump golang.org/x/text from 0.17.0 to 0.18.0 (#3259)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.17.0 to 0.18.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.17.0...v0.18.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+b728c12f chore(deps): bump golang.org/x/text from 0.17.0 to 0.18.0 in /_examples (#3256)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.17.0 to 0.18.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.17.0...v0.18.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+cba40a38 chore(deps-dev): bump vite from 5.4.2 to 5.4.3 in /integration (#3257)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.2 to 5.4.3.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.4.3/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+f7bee06f chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3258)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.5...v3.11.8)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+81ac627d chore(deps): bump robherley/go-test-action from 0.4.1 to 0.5.0 (#3255)
+
+Bumps [robherley/go-test-action](https://github.com/robherley/go-test-action) from 0.4.1 to 0.5.0.
+- [Release notes](https://github.com/robherley/go-test-action/releases)
+- [Commits](https://github.com/robherley/go-test-action/compare/v0.4.1...v0.5.0)
+
+---
+updated-dependencies:
+- dependency-name: robherley/go-test-action
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+86ac6b36 internal/code: `Unalias` element of pointer (#3250) (closes #3247)
+
+This reverts commit 4c4be0aeaaad758e703724fe4a6575768017ac53.
+
+* code: `Unalias` element of pointer
+
+* chore: added comment
+
+
+
+- 4c4be0ae codegen: Unalias before lookup type (#3247)
+
+ab1781b1 codegen: Go 1.23 alias support (#3246)
+
+* code: added `Unalias` for Go 1.22
+
+* codegen: Go 1.23 alias support
+
+
+
+814f7c71 chore(deps): bump actions/upload-artifact from 4.3.6 to 4.4.0 (#3235)
+
+Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.6 to 4.4.0.
+- [Release notes](https://github.com/actions/upload-artifact/releases)
+- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.6...v4.4.0)
+
+---
+updated-dependencies:
+- dependency-name: actions/upload-artifact
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+1cbbc120 chore(deps): bump github.com/rs/cors from 1.11.0 to 1.11.1 in /_examples (#3236)
+
+Bumps [github.com/rs/cors](https://github.com/rs/cors) from 1.11.0 to 1.11.1.
+- [Commits](https://github.com/rs/cors/compare/v1.11.0...v1.11.1)
+
+---
+updated-dependencies:
+- dependency-name: github.com/rs/cors
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+2da2ac36 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3237)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.4...v3.11.5)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+0b9bd5ee refactor: don't extract [@goField](https://github.com/goField) twice (#3234)
+
+We already extract the values in config.Init(). Remove the duplicate logic in the modelgen plugin.
+
+We leave the reference to GoFieldHook even though it's a noop since it's public. This makes this a non-breaking change. We will remove this during the next breaking release.
+
+
+
+18378f90 feat: allow argument directives to be called even if the argument is null (#3233) (closes #3188)
+
+The existing implementation assumes that if an input argument is null, you don't want to call the directive. This is a very constraining assumption — directives may want to not just mutate an argument but to actually outright set it.
+
+This is a breaking change as argument directives now need to handle null input values. Added a new config switch:
+
+call_argument_directives_with_nulls: bool
+
+to control this new behavior.
+
+* Run go generate ./...
+
+
+
+- 3e76e7ee only close websocket once (#3231)
+
+256794aa chore(deps-dev): bump vite from 5.4.0 to 5.4.2 in /integration (#3229)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.0 to 5.4.2.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.4.2/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+6acc182c Go 1.23 support (#3226)
+
+* Added support for go 1.23
+
+* Added handling for *types.Alias
+
+* Updated golang ci lint to 1.60.2
+
+* Fixed lint issues and ignore SA1019 on generated test files
+
+* Update coverage.yml
+
+* Update fmt-and-generate.yml
+
+* Update integration.yml
+
+* Update lint.yml
+
+* Update test.yml
+
+---------
+
+
+
+f6a82204 chore(deps): bump golang.org/x/tools from 0.23.0 to 0.24.0 (#3219)
+
+* chore(deps): bump golang.org/x/tools from 0.23.0 to 0.24.0
+
+Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.23.0 to 0.24.0.
+- [Release notes](https://github.com/golang/tools/releases)
+- [Commits](https://github.com/golang/tools/compare/v0.23.0...v0.24.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/tools
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+* _examples fixup
+
+
+---------
+
+
+
+1849e124 chore(deps): bump golang.org/x/text from 0.16.0 to 0.17.0 (#3218)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.16.0 to 0.17.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.16.0...v0.17.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+2f7772c9 [proposal] Add [@concurrent](https://github.com/concurrent) directive for types (#3203)
+
+* Issue 3202
+
+* Issue 3202
+
+* Issue 3202
+
+* Make optional concurrent for fields of objects
+
+* Make optional concurrent for fields of objects
+
+
+
+3556475a Fix marshaling interfaces and union types (#3211)
+
+* Fixed marshaling interfaces and union
+
+* Fixed marshaling interfaces and union
+
+
+
+23abdc56 chore(deps): bump github.com/urfave/cli/v2 from 2.27.3 to 2.27.4 (#3217)
+
+Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.3 to 2.27.4.
+- [Release notes](https://github.com/urfave/cli/releases)
+- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md)
+- [Commits](https://github.com/urfave/cli/compare/v2.27.3...v2.27.4)
+
+---
+updated-dependencies:
+- dependency-name: github.com/urfave/cli/v2
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+- bbc354c6 Add local toolchain for matrix
+
+3fe8329d chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3215)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.2...v3.11.4)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+edca7992 chore(deps-dev): bump vite from 5.3.5 to 5.4.0 in /integration (#3216)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.5 to 5.4.0.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+f0b7ee3f chore(deps): bump actions/upload-artifact from 4.3.5 to 4.3.6 (#3220)
+
+Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.5 to 4.3.6.
+- [Release notes](https://github.com/actions/upload-artifact/releases)
+- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.5...v4.3.6)
+
+---
+updated-dependencies:
+- dependency-name: actions/upload-artifact
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+719b7af3 chore(deps): bump golang.org/x/text from 0.16.0 to 0.17.0 in /_examples (#3221)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.16.0 to 0.17.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.16.0...v0.17.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+d14fd791 chore(deps): bump actions/upload-artifact from 4.3.4 to 4.3.5 (#3208)
+
+Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.4 to 4.3.5.
+- [Release notes](https://github.com/actions/upload-artifact/releases)
+- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.4...v4.3.5)
+
+---
+updated-dependencies:
+- dependency-name: actions/upload-artifact
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+564e2dc5 chore(deps): bump golangci/golangci-lint-action from 6.0.1 to 6.1.0 (#3207)
+
+Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0.
+- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
+- [Commits](https://github.com/golangci/golangci-lint-action/compare/v6.0.1...v6.1.0)
+
+---
+updated-dependencies:
+- dependency-name: golangci/golangci-lint-action
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+d3d147e6 chore(deps): bump golang.org/x/tools from 0.22.0 to 0.23.0 (#3172)
+
+* chore(deps): bump golang.org/x/tools from 0.22.0 to 0.23.0
+
+Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.22.0 to 0.23.0.
+- [Release notes](https://github.com/golang/tools/releases)
+- [Commits](https://github.com/golang/tools/compare/v0.22.0...v0.23.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/tools
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+
+---------
+
+
+
+2d7e00b5 chore(deps-dev): bump typescript from 5.5.3 to 5.5.4 in /integration (#3196)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.5.3 to 5.5.4.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.5.3...v5.5.4)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+5f86c55a chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3197)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+552bb4b9 chore(deps-dev): bump vite from 5.3.4 to 5.3.5 in /integration (#3199)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.4 to 5.3.5.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.3.5/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+45a29fe0 chore(deps): bump github.com/urfave/cli/v2 from 2.27.2 to 2.27.3 (#3200)
+
+Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.2 to 2.27.3.
+- [Release notes](https://github.com/urfave/cli/releases)
+- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md)
+- [Commits](https://github.com/urfave/cli/compare/v2.27.2...v2.27.3)
+
+---
+updated-dependencies:
+- dependency-name: github.com/urfave/cli/v2
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+3c2443e4 chore(deps): bump golang.org/x/sync from 0.7.0 to 0.8.0 in /_examples (#3206)
+
+Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.7.0 to 0.8.0.
+- [Commits](https://github.com/golang/sync/compare/v0.7.0...v0.8.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/sync
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+52f65d0f chore(deps-dev): bump vitest from 2.0.4 to 2.0.5 in /integration (#3209)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.4 to 2.0.5.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.5/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+1beac8b7 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3210)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.8...v3.11.2)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+- 9b031e4d chore: fix typos in comments, tests and unexported vars (#3193)
+
+- 892c4842 refactor: decrease indentation in api.ReplacePlugin (#3194)
+
+d1682f7c chore(deps-dev): bump vite from 5.3.3 to 5.3.4 in /integration (#3190)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.3 to 5.3.4.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.3.4/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+cfc9863a chore(deps-dev): bump vitest from 2.0.2 to 2.0.4 in /integration (#3189)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.2 to 2.0.4.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.4/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+1cc0a17b Revert "feat: allow argument directives to be called even if the argument is …" (#3191)
+
+This reverts commit 0fb31a3ed2a63552eddcf7c2a6c40aa0d59bd4cc.
+
+
+
+0fb31a3e feat: allow argument directives to be called even if the argument is null (#3188)
+
+The existing implementation assumes that if an input argument is null, you don't want to call the directive. This is a very constraining assumption — directives may want to not just mutate an argument but to actually outright set it.
+
+This is a breaking change as argument directives now need to handle null input values. Added a new config switch:
+
+call_argument_directives_with_nulls: bool
+
+to control this new behavior.
+
+
+
+cd82be01 refactor: significantly clean up the federation.gotpl template (#3187) (closes #2991)
+
+* fix: fix Federation example
+
+Some configurations weren't working due to a missing resolver.
+
+* chore: Introduce mechanism for running all example Federation subgraphs
+
+This enables engineers to more easily run the debugger on the Federation example. Updated README to show how to use it.
+
+* refactor: significantly clean up the federation.gotpl template
+
+There were a number of inline structs and inline functions that made it extremely hard to reason about what the code is doing. Split these out into smaller functions with less closures and mutation.
+
+
+
+a63f94bb chore(deps-dev): bump vitest from 1.6.0 to 2.0.2 in /integration (#3185)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.6.0 to 2.0.2.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.2/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-major
+...
+
+
+
+de315d3d chore: Refactor federation.go to make it easier to read (#3183) (closes #2991)
+
+* chore: Refactor federation.go
+
+- Cut functions into smaller functions
+- Remove mutation in several locations
+
+
+* Refactor InjectSourcesLate
+
+Easier to reason about and read this way.
+
+* Re-run go generate ./...
+
+* regenerate
+
+
+---------
+
+
+
+4d8d93cd Make cache generic to avoid casting (#3179)
+
+* Make cache generic to avoid casting
+
+
+* Update handler/handler.go
+
+---------
+
+
+
+f2cf11e5 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3174)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+fc150db0 chore(deps-dev): bump typescript from 5.5.2 to 5.5.3 in /integration (#3175)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.5.2 to 5.5.3.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.5.2...v5.5.3)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+60c9f671 chore(deps): bump actions/upload-artifact from 4.3.3 to 4.3.4 (#3176)
+
+Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.3 to 4.3.4.
+- [Release notes](https://github.com/actions/upload-artifact/releases)
+- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.3...v4.3.4)
+
+---
+updated-dependencies:
+- dependency-name: actions/upload-artifact
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+59bdde19 chore(deps-dev): bump vite from 5.3.2 to 5.3.3 in /integration (#3173)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.2 to 5.3.3.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.3.3/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+0ca3b19e chore(deps): bump github.com/rs/cors (#3171)
+
+Bumps the go_modules group with 1 update in the /_examples/websocket-initfunc/server directory: [github.com/rs/cors](https://github.com/rs/cors).
+
+
+Updates `github.com/rs/cors` from 1.9.0 to 1.11.0
+- [Commits](https://github.com/rs/cors/compare/v1.9.0...v1.11.0)
+
+---
+updated-dependencies:
+- dependency-name: github.com/rs/cors
+ dependency-type: direct:production
+ dependency-group: go_modules
+...
+
+
+
+d0e68928 Nulls are now unmarshalled as zero values for primitive types (#3162)
+
+* Nulls are now unmarshalled as zero values for primitive types
+
+* Address uint and run gofumpt
+
+
+---------
+
+
+
+dce2e353 chore(deps): bump test-summary/action from 2.3 to 2.4 (#3163)
+
+Bumps [test-summary/action](https://github.com/test-summary/action) from 2.3 to 2.4.
+- [Release notes](https://github.com/test-summary/action/releases)
+- [Commits](https://github.com/test-summary/action/compare/v2.3...v2.4)
+
+---
+updated-dependencies:
+- dependency-name: test-summary/action
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+2afa0c22 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3164)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.6...v3.10.8)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+2aeb1518 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3165)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+28b2f494 chore(deps-dev): bump vite from 5.3.1 to 5.3.2 in /integration (#3166)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.1 to 5.3.2.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.3.2/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+f82d604a chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/schema-ast in /integration (#3167)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/plugins/other/schema-ast/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+dd37ea00 chore(deps-dev): bump typescript from 5.4.5 to 5.5.2 in /integration (#3157)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.5 to 5.5.2.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.5...v5.5.2)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+7b9c3223 chore(deps-dev): bump graphql from 16.8.2 to 16.9.0 in /integration (#3158)
+
+Bumps [graphql](https://github.com/graphql/graphql-js) from 16.8.2 to 16.9.0.
+- [Release notes](https://github.com/graphql/graphql-js/releases)
+- [Commits](https://github.com/graphql/graphql-js/compare/v16.8.2...v16.9.0)
+
+---
+updated-dependencies:
+- dependency-name: graphql
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+b822c2c0 chore(deps): bump mikepenz/action-junit-report from 4.3.0 to 4.3.1 (#3159)
+
+Bumps [mikepenz/action-junit-report](https://github.com/mikepenz/action-junit-report) from 4.3.0 to 4.3.1.
+- [Release notes](https://github.com/mikepenz/action-junit-report/releases)
+- [Commits](https://github.com/mikepenz/action-junit-report/compare/v4.3.0...v4.3.1)
+
+---
+updated-dependencies:
+- dependency-name: mikepenz/action-junit-report
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+c1525831 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3156)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.5...v3.10.6)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- feab5f51 fix bug: POST Insufficient rigorous judgment leads to invalid SSE (#3153)
+
+- 7c8bc50d Add failing test as example (#3151)
+
+d00ace38 Add prettier test results (#3148)
+
+* Add prettier test results
+
+
+
+641377d7 chore(deps-dev): bump ws in /integration in the npm_and_yarn group (#3147)
+
+Bumps the npm_and_yarn group in /integration with 1 update: [ws](https://github.com/websockets/ws).
+
+
+Updates `ws` from 8.16.0 to 8.17.1
+- [Release notes](https://github.com/websockets/ws/releases)
+- [Commits](https://github.com/websockets/ws/compare/8.16.0...8.17.1)
+
+---
+updated-dependencies:
+- dependency-name: ws
+ dependency-type: indirect
+ dependency-group: npm_and_yarn
+...
+
+
+
+- e724bde5 docs: missing 'repeatable' in [@goExtraField](https://github.com/goExtraField) directive (#3150)
+
+- 85459a32 Fix typo in config field names (#3149)
+
+- 1422ff25 feat: Change plugin signatures (#2011)
+
+04b13fdb chore(deps-dev): bump vite from 5.2.13 to 5.3.1 in /integration (#3144)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.13 to 5.3.1.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.3.1/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+a1ccf971 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3143)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.4...v3.10.5)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+8a59a2c4 chore(deps-dev): bump graphql from 16.8.1 to 16.8.2 in /integration (#3142)
+
+Bumps [graphql](https://github.com/graphql/graphql-js) from 16.8.1 to 16.8.2.
+- [Release notes](https://github.com/graphql/graphql-js/releases)
+- [Commits](https://github.com/graphql/graphql-js/compare/v16.8.1...v16.8.2)
+
+---
+updated-dependencies:
+- dependency-name: graphql
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+80098c67 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3141)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+fc90169b chore(deps): bump google.golang.org/protobuf from 1.34.1 to 1.34.2 (#3140)
+
+Bumps google.golang.org/protobuf from 1.34.1 to 1.34.2.
+
+---
+updated-dependencies:
+- dependency-name: google.golang.org/protobuf
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+- fb67b709 v0.17.49 postrelease bump
+
+
+
+
+
+
+## [v0.17.49](https://github.com/99designs/gqlgen/compare/v0.17.48...v0.17.49) - 2024-06-13
+- d093c6e5 release v0.17.49
+
+- a4f997f8 refactor: add missed file.Close() and use t.TempDir() (#3137)
+
+f813598b #3118 Add token limit option to fix CVE-2023-49559 (#3136)
+
+* Use ParseQueryWithLmit and add parserTokenLimit to executor
+
+* add parser token limit test
+
+* remove failing test
+
+* move default token limit to const
+
+---------
+
+
+
+ee1e18c7 chore(deps-dev): bump braces in /integration in the npm_and_yarn group (#3134)
+
+Bumps the npm_and_yarn group in /integration with 1 update: [braces](https://github.com/micromatch/braces).
+
+
+Updates `braces` from 3.0.2 to 3.0.3
+- [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md)
+- [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3)
+
+---
+updated-dependencies:
+- dependency-name: braces
+ dependency-type: indirect
+ dependency-group: npm_and_yarn
+...
+
+
+
+d6226db6 chore(deps): bump github.com/vektah/gqlparser/v2 from 2.5.12 to 2.5.14 in the go_modules group (#3133)
+
+* chore(deps): bump github.com/vektah/gqlparser/v2 in the go_modules group
+
+Bumps the go_modules group with 1 update: [github.com/vektah/gqlparser/v2](https://github.com/vektah/gqlparser).
+
+
+Updates `github.com/vektah/gqlparser/v2` from 2.5.12 to 2.5.14
+- [Release notes](https://github.com/vektah/gqlparser/releases)
+- [Commits](https://github.com/vektah/gqlparser/compare/v2.5.12...v2.5.14)
+
+---
+updated-dependencies:
+- dependency-name: github.com/vektah/gqlparser/v2
+ dependency-type: direct:production
+ dependency-group: go_modules
+...
+
+
+* Update to v2.5.16
+
+
+---------
+
+
+
+6daceaf3 Linter update + add revive rules (#3127)
+
+* Linter update + add revive rules
+
+
+* More revive lints
+
+
+---------
+
+
+
+e6860c35 chore(deps): bump golang.org/x/tools from 0.21.0 to 0.22.0 (#3125)
+
+Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.21.0 to 0.22.0.
+- [Release notes](https://github.com/golang/tools/releases)
+- [Commits](https://github.com/golang/tools/compare/v0.21.0...v0.22.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/tools
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+3bad9617 chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0 (#3124)
+
+* chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.15.0 to 0.16.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.15.0...v0.16.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+* Update examples go mod
+
+
+---------
+
+
+
+4492b3c0 chore(deps-dev): bump vite from 5.2.12 to 5.2.13 in /integration (#3126)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.12 to 5.2.13.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/v5.2.13/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.13/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+8ec8d795 chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0 in /_examples (#3123)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.15.0 to 0.16.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.15.0...v0.16.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+- d9ba3405 v0.17.48 postrelease bump
+
+
+
+
+
+
+## [v0.17.48](https://github.com/99designs/gqlgen/compare/v0.17.47...v0.17.48) - 2024-06-06
+- 621350a1 release v0.17.48
+
+fbf73ee1 chore(deps-dev): bump vite from 5.2.11 to 5.2.12 in /integration (#3117)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.11 to 5.2.12.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.12/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+e07134ab add option to omit panic handlers during development (#3114)
+
+see docs for motivation
+
+
+
+- 1a7c6090 refactor: fix gocritic lint issues (#3113)
+
+- 4114515f refactor: use errors.New instead of fmt.Errorf (#3112)
+
+- 93f6366d Omit gqlgen version in config files used for tests (#3111)
+
+dae915d2 Correct dataloader example (#3110)
+
+Dataloader requires the value and error slice to be of equal length, in order to correctly return the values.
+
+Link: https://github.com/vikstrous/dataloadgen/blob/7de6ebe3d882737607ce2ba646e8d6ec652b32e3/dataloadgen_test.go#L19-L20
+
+
+
+bd9219dd Go template function to split string into array of strings. (#3108)
+
+* added new template function to split string
+
+* StrSplit func to upper
+
+---------
+
+
+
+- 6c83b9ea Remove duplicated return_pointers_in_unmarshalinput explanation (#3109)
+
+- d2a6bd5f refactor: fix testifylint.go-require lint issues (#3107)
+
+b18d0287 testifylint v1.3.0 fixes (#3103)
+
+* Resolve Merge conflict
+
+
+* Autofixes
+
+
+* Lots more fixes and formatting
+
+
+* Add one more
+
+
+* Apply suggestions from code review
+
+
+---------
+
+
+
+- bbb0c959 chore: fix tests, pin golangci-lint version (#3105)
+
+- 57e88b27 Forgot the examples portion (#3101)
+
+- ff77f8b2 Some minor test lint (#3102)
+
+90f2271e refactor: use t.Log instead of fmt.Print (#3099)
+
+* refactor: use t.Log instead of fmt.Printf
+
+* Add back failure context as to what errors happened and where
+
+
+---------
+
+
+
+- d7447c69 refactor: rename local variables to match Go codestyle (#3100)
+
+- 834d832c refactor: avoid panic in tests (#3098)
+
+- 71845858 Ignore gorilla/websocket 1.5.1 in dependabot (#3097)
+
+- 4ecfec90 Fix go install gqlgen binary (#3095)
+
+- 866075cd refactor: simplify with strconv.FormatBool (#3094)
+
+- ab19907d refactor: UnmarshalID implementation (#3093)
+
+- a9965fbd refactor: use 'any' instead of 'interface{}' for consistency (#3090)
+
+d5c9f896 Embed extra fields config (#3088)
+
+---------
+
+
+
+0b9e6f9c chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3085)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.3...v3.10.4)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+33aad657 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3084)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- 58d6978e v0.17.47 postrelease bump
+
+
+
+
+
+
+## [v0.17.47](https://github.com/99designs/gqlgen/compare/v0.17.46...v0.17.47) - 2024-05-18
+- a9f2b500 release v0.17.47
+
+- 611cbcec Update gqlparser (#3080)
+
+3a5827d4 Fix #2856: resolver receive previous implementation on render (#2886)
+
+* pass previous impl to resolver
+
+* pass previous only and not default
+
+
+
+- e0125301 bugfix for [@goField](https://github.com/goField) + [@goExtraField](https://github.com/goExtraField) combination (#3078)
+
+e61a7200 Federation: Update docs to use IntrospectAndCompose (#3077)
+
+`serviceList` now gets a deprecation warning to use IntrospectAndCompose
+instead. We update our docs to avoid referring to deprecated services
+
+
+
+de31828a Ability to inline extraFields configuration. New [@goExtraField](https://github.com/goExtraField) directive. (#3076)
+
+---------
+
+
+
+- 8b4df636 Go mod tidy (#3075)
+
+ae9787cb chore(deps): bump github.com/sosodev/duration from 1.3.0 to 1.3.1 (#3070)
+
+* chore(deps): bump github.com/sosodev/duration from 1.3.0 to 1.3.1
+
+Bumps [github.com/sosodev/duration](https://github.com/sosodev/duration) from 1.3.0 to 1.3.1.
+- [Release notes](https://github.com/sosodev/duration/releases)
+- [Commits](https://github.com/sosodev/duration/compare/v1.3.0...v1.3.1)
+
+---
+updated-dependencies:
+- dependency-name: github.com/sosodev/duration
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+* go mod tidy examples
+
+
+* Pin gorilla to skip 1.5.1
+
+
+---------
+
+
+
+32014fdb chore(deps): bump golang.org/x/tools from 0.20.0 to 0.21.0 (#3072)
+
+Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.20.0 to 0.21.0.
+- [Release notes](https://github.com/golang/tools/releases)
+- [Commits](https://github.com/golang/tools/compare/v0.20.0...v0.21.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/tools
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+1b5ed7c0 chore(deps-dev): bump urql from 4.0.7 to 4.1.0 in /integration (#3074)
+
+Bumps [urql](https://github.com/urql-graphql/urql/tree/HEAD/packages/react-urql) from 4.0.7 to 4.1.0.
+- [Release notes](https://github.com/urql-graphql/urql/releases)
+- [Changelog](https://github.com/urql-graphql/urql/blob/main/packages/react-urql/CHANGELOG.md)
+
+---
+updated-dependencies:
+- dependency-name: urql
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+77ea79a8 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3073)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.2...v3.10.3)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+358c7a2b chore(deps): bump google.golang.org/protobuf from 1.34.0 to 1.34.1 (#3071)
+
+Bumps google.golang.org/protobuf from 1.34.0 to 1.34.1.
+
+---
+updated-dependencies:
+- dependency-name: google.golang.org/protobuf
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+5c951f4e chore(deps): bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 (#3069)
+
+Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.3.0 to 6.0.1.
+- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
+- [Commits](https://github.com/golangci/golangci-lint-action/compare/v5.3.0...v6.0.1)
+
+---
+updated-dependencies:
+- dependency-name: golangci/golangci-lint-action
+ dependency-type: direct:production
+ update-type: version-update:semver-major
+...
+
+
+
+- 42cae907 chore: remove deprecated errcheck.ignore lint option (#3062)
+
+- 1a59d58b Fix typo in error message (#3065)
+
+- 39d3d8d0 refactor: simplify test asserts (#3061)
+
+- 7421bdfb refactor: compile regex only once (#3063)
+
+- a4bf3a7e chore: simplify generating examples in release script (#3064)
+
+- 45f6eb56 v0.17.46 postrelease bump
+
+
+
+
+
+
+## [v0.17.46](https://github.com/99designs/gqlgen/compare/v0.17.45...v0.17.46) - 2024-05-07
+- 90af8bf5 release v0.17.46
+
+- bf49e56a fix: failed to build _examples/websocket-initfunc/server/server.go (#3055) (#3058)
+
+1ee0fa80 chore(deps-dev): bump vite from 5.2.10 to 5.2.11 in /integration (#3047)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.10 to 5.2.11.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.11/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+ddd9a6ba chore(deps): bump golang.org/x/text from 0.14.0 to 0.15.0 (#3052)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.14.0 to 0.15.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.14.0...v0.15.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+36b66607 chore(deps): bump github.com/PuerkitoBio/goquery from 1.9.1 to 1.9.2 (#3051)
+
+* chore(deps): bump github.com/PuerkitoBio/goquery from 1.9.1 to 1.9.2
+
+Bumps [github.com/PuerkitoBio/goquery](https://github.com/PuerkitoBio/goquery) from 1.9.1 to 1.9.2.
+- [Release notes](https://github.com/PuerkitoBio/goquery/releases)
+- [Commits](https://github.com/PuerkitoBio/goquery/compare/v1.9.1...v1.9.2)
+
+---
+updated-dependencies:
+- dependency-name: github.com/PuerkitoBio/goquery
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+* go mod tidy
+
+
+---------
+
+
+
+ad91bf6c chore(deps-dev): bump vitest from 1.5.2 to 1.6.0 in /integration (#3048)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.5.2 to 1.6.0.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v1.6.0/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+a5cb576c chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3049)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.1...v3.10.2)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+6b423e51 chore(deps): bump google.golang.org/protobuf from 1.33.0 to 1.34.0 (#3050)
+
+Bumps google.golang.org/protobuf from 1.33.0 to 1.34.0.
+
+---
+updated-dependencies:
+- dependency-name: google.golang.org/protobuf
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+c34e246b chore(deps): bump golang.org/x/text from 0.14.0 to 0.15.0 in /_examples (#3053)
+
+Bumps [golang.org/x/text](https://github.com/golang/text) from 0.14.0 to 0.15.0.
+- [Release notes](https://github.com/golang/text/releases)
+- [Commits](https://github.com/golang/text/compare/v0.14.0...v0.15.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/text
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+a3991df0 chore(deps): bump golangci/golangci-lint-action from 5.0.0 to 5.3.0 (#3054)
+
+Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.0.0 to 5.3.0.
+- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
+- [Commits](https://github.com/golangci/golangci-lint-action/compare/v5.0.0...v5.3.0)
+
+---
+updated-dependencies:
+- dependency-name: golangci/golangci-lint-action
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+- 769632a1 chore: simplify go generate in examples (#3033)
+
+- f24ae887 enum values binding v2 (#3014)
+
+b3a10547 Add initial cache tests for MapCache and NoCache (#3040)
+
+* Add initial cache tests for MapCache and NoCache
+
+* Add edge case testing to MapCache and NoCache
+
+* Reformat, regenerate
+
+
+---------
+
+
+
+- 16854647 chore: lint _examples directory (#3042)
+
+- 2bb32fe7 chore: remove deprecated build tag (#3041)
+
+- 4b559b33 Fix codegen config tests: add file closing (#3037)
+
+- 293991e9 docs: fix links to the docs latest version (#3038)
+
+- 79dc5e03 refactor: change test asserts to be more idiomatic (#3036)
+
+- a1989525 chore: remove unnecessary empty lines (#3035)
+
+- 6998f19f chore: `run.skip-dirs` is deprecated in golangci-lint v1.57 (#3034)
+
+835c2d11 Improve federation resolver selection (#3029)
+
+* Improve federation resolver selection
+
+Just checking for existence of keys in the representations isn't enough. If the values are null, we should skip the resolver.
+
+* Run go generate ./...
+
+* Add test cases
+
+* Fix linter
+
+
+
+9e8e7edd refactor: simplify tests for `api.Generate` (#3031)
+
+* refactor: simplify tests for Generate
+
+* Add deleted files to git ignore
+
+
+---------
+
+
+
+- 28405ac1 Fix test asserts: reverse expected and actual params (#3027)
+
+75326bc7 Bump github.com/sosodev/duration from 1.2.0 to 1.3.0 (#3024)
+
+* Bump github.com/sosodev/duration from 1.2.0 to 1.3.0
+
+Bumps [github.com/sosodev/duration](https://github.com/sosodev/duration) from 1.2.0 to 1.3.0.
+- [Release notes](https://github.com/sosodev/duration/releases)
+- [Commits](https://github.com/sosodev/duration/compare/v1.2.0...v1.3.0)
+
+---
+updated-dependencies:
+- dependency-name: github.com/sosodev/duration
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+* go mod tidy
+
+
+---------
+
+
+
+bf4406a1 Bump vitest from 1.5.0 to 1.5.2 in /integration (#3021)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.5.0 to 1.5.2.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v1.5.2/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+1a8ebe9b Bump [@apollo](https://github.com/apollo)/client from 3.9.11 to 3.10.1 in /integration (#3022)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.11...v3.10.1)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+bacaab8e Bump github.com/urfave/cli/v2 from 2.27.1 to 2.27.2 (#3023)
+
+Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.1 to 2.27.2.
+- [Release notes](https://github.com/urfave/cli/releases)
+- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md)
+- [Commits](https://github.com/urfave/cli/compare/v2.27.1...v2.27.2)
+
+---
+updated-dependencies:
+- dependency-name: github.com/urfave/cli/v2
+ dependency-type: direct:production
+ update-type: version-update:semver-patch
+...
+
+
+
+3f515543 Bump github.com/rs/cors from 1.10.1 to 1.11.0 in /_examples (#3025)
+
+Bumps [github.com/rs/cors](https://github.com/rs/cors) from 1.10.1 to 1.11.0.
+- [Commits](https://github.com/rs/cors/compare/v1.10.1...v1.11.0)
+
+---
+updated-dependencies:
+- dependency-name: github.com/rs/cors
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+
+ced2189d Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 (#3026)
+
+Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4.0.0 to 5.0.0.
+- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
+- [Commits](https://github.com/golangci/golangci-lint-action/compare/v4.0.0...v5.0.0)
+
+---
+updated-dependencies:
+- dependency-name: golangci/golangci-lint-action
+ dependency-type: direct:production
+ update-type: version-update:semver-major
+...
+
+
+
+- ada00f78 chore: remove unused lint.txt (#3017)
+
+8bd35429 chore: fix some typos in comments (#3020)
+
+* chore: fix some typos in comments
+
+
+* Apply suggestions from code review
+
+---------
+
+
+
+e1ef86e7 Bump vite from 5.2.8 to 5.2.10 in /integration (#3015)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.8 to 5.2.10.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.10/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+ecc3f647 Bump [@apollo](https://github.com/apollo)/client from 3.9.10 to 3.9.11 in /integration (#3011)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.10...v3.9.11)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+c92b511c Bump typescript from 5.4.4 to 5.4.5 in /integration (#3010)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.4 to 5.4.5.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.4...v5.4.5)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+cc2d95a2 Bump vitest from 1.4.0 to 1.5.0 in /integration (#3012)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.4.0 to 1.5.0.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v1.5.0/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+c17a4b6f fix: codegen will _ the fieldset parameter if its not needed (#3006)
+
+* fix: codegen will _ the fieldset parameter if its not needed
+
+* update generated examples
+
+
+
+- 0b0f6592 chore: update Automatic Persisted Queries Link (#3005)
+
+79aa0ceb Mark ctx as unused when no arguments for FieldContextFunc (#2999)
+
+* Mark ctx as unused when no arguments for FieldContextFunc
+
+* Regenerate
+
+
+---------
+
+
+
+f3b34683 Bump urql from 4.0.6 to 4.0.7 in /integration (#2995) (closes #2998)
+
+* Bump urql from 4.0.6 to 4.0.7 in /integration
+
+Bumps [urql](https://github.com/urql-graphql/urql/tree/HEAD/packages/react-urql) from 4.0.6 to 4.0.7.
+- [Release notes](https://github.com/urql-graphql/urql/releases)
+- [Changelog](https://github.com/urql-graphql/urql/blob/main/packages/react-urql/CHANGELOG.md)
+
+---
+updated-dependencies:
+- dependency-name: urql
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+client.
+
+---------
+
+
+
+8ab31646 Bump graphql-ws from 5.15.0 to 5.16.0 in /integration (#2986)
+
+Bumps [graphql-ws](https://github.com/enisdenjo/graphql-ws) from 5.15.0 to 5.16.0.
+- [Release notes](https://github.com/enisdenjo/graphql-ws/releases)
+- [Changelog](https://github.com/enisdenjo/graphql-ws/blob/master/CHANGELOG.md)
+- [Commits](https://github.com/enisdenjo/graphql-ws/compare/v5.15.0...v5.16.0)
+
+---
+updated-dependencies:
+- dependency-name: graphql-ws
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+45fafedc Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#2996)
+
+* Bump golang.org/x/tools from 0.19.0 to 0.20.0
+
+Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.19.0 to 0.20.0.
+- [Release notes](https://github.com/golang/tools/releases)
+- [Commits](https://github.com/golang/tools/compare/v0.19.0...v0.20.0)
+
+---
+updated-dependencies:
+- dependency-name: golang.org/x/tools
+ dependency-type: direct:production
+ update-type: version-update:semver-minor
+...
+
+
+* Update examples to match root go.mod
+
+
+---------
+
+
+
+4c45be21 Bump vite from 5.2.7 to 5.2.8 in /integration (#2992)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.7 to 5.2.8.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.8/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- 6e5a7758 Update `tools.go` url (#2987)
+
+6771a804 Bump [@apollo](https://github.com/apollo)/client from 3.9.9 to 3.9.10 in /integration (#2994)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.9...v3.9.10)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+2ce5fcd1 Bump typescript from 5.4.3 to 5.4.4 in /integration (#2993)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.3 to 5.4.4.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.3...v5.4.4)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- 99d7d887 fix: stop loading package dependencies (#2988)
+
+- d0a1aec2 enum values binding (#2982)
+
+6352b800 Bump vite from 5.2.6 to 5.2.7 in /integration (#2984)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.6 to 5.2.7.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.7/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+2286b0e8 Bump graphql-sse from 2.5.2 to 2.5.3 in /integration (#2985)
+
+Bumps [graphql-sse](https://github.com/enisdenjo/graphql-sse) from 2.5.2 to 2.5.3.
+- [Release notes](https://github.com/enisdenjo/graphql-sse/releases)
+- [Changelog](https://github.com/enisdenjo/graphql-sse/blob/master/CHANGELOG.md)
+- [Commits](https://github.com/enisdenjo/graphql-sse/compare/v2.5.2...v2.5.3)
+
+---
+updated-dependencies:
+- dependency-name: graphql-sse
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+8ab2c27a Bump [@graphql](https://github.com/graphql)-codegen/client-preset from 4.2.4 to 4.2.5 in /integration (#2983)
+
+- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases)
+- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- 780bf27a Add UintID type binding (#2980)
+
+d192a591 Bump [@apollo](https://github.com/apollo)/client from 3.9.7 to 3.9.9 in /integration (#2977)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.7...v3.9.9)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+62289425 Bump vite from 5.1.6 to 5.2.6 in /integration (#2978)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.1.6 to 5.2.6.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.2.6/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+105ec44b Bump typescript from 5.4.2 to 5.4.3 in /integration (#2979)
+
+Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.2 to 5.4.3.
+- [Release notes](https://github.com/Microsoft/TypeScript/releases)
+- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml)
+- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.2...v5.4.3)
+
+---
+updated-dependencies:
+- dependency-name: typescript
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- 0afd63a5 chore: remove repetitive words (#2976)
+
+ee526b05 Bump vite from 5.1.5 to 5.1.6 in /integration (#2971)
+
+Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.1.5 to 5.1.6.
+- [Release notes](https://github.com/vitejs/vite/releases)
+- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md)
+- [Commits](https://github.com/vitejs/vite/commits/v5.1.6/packages/vite)
+
+---
+updated-dependencies:
+- dependency-name: vite
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+00bf8ef3 Bump vitest from 1.3.1 to 1.4.0 in /integration (#2972)
+
+Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.3.1 to 1.4.0.
+- [Release notes](https://github.com/vitest-dev/vitest/releases)
+- [Commits](https://github.com/vitest-dev/vitest/commits/v1.4.0/packages/vitest)
+
+---
+updated-dependencies:
+- dependency-name: vitest
+ dependency-type: direct:development
+ update-type: version-update:semver-minor
+...
+
+
+
+bdbdddf5 Bump [@apollo](https://github.com/apollo)/client from 3.9.6 to 3.9.7 in /integration (#2970)
+
+- [Release notes](https://github.com/apollographql/apollo-client/releases)
+- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md)
+- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.6...v3.9.7)
+
+---
+updated-dependencies:
+ dependency-type: direct:development
+ update-type: version-update:semver-patch
+...
+
+
+
+- fa221f64 Update Changelog
+
+- f897668b v0.17.45 postrelease bump
+
+
+
+
+
## [v0.17.45](https://github.com/99designs/gqlgen/compare/v0.17.44...v0.17.45) - 2024-03-11
- b6d1a8b9 release v0.17.45
@@ -3573,7 +5648,7 @@ when generating next the context was captured there.
Which means later when the returned function from DispatchOperation is
called. The responseContext which accumulates the errors is the
-tempResponseContext which we no longer have access to read the errors
+tempResponseContext which we no longer have access to to read the errors
out of it.
Instead add a context to next() so that it can be passed through and
diff --git a/vendor/github.com/99designs/gqlgen/api/generate.go b/vendor/github.com/99designs/gqlgen/api/generate.go
index 9e7b4188..7b83be50 100644
--- a/vendor/github.com/99designs/gqlgen/api/generate.go
+++ b/vendor/github.com/99designs/gqlgen/api/generate.go
@@ -45,7 +45,11 @@ func Generate(cfg *config.Config, option ...Option) error {
}
}
}
- plugins = append([]plugin.Plugin{federation.New(cfg.Federation.Version)}, plugins...)
+ federationPlugin, err := federation.New(cfg.Federation.Version, cfg)
+ if err != nil {
+ return fmt.Errorf("failed to construct the Federation plugin: %w", err)
+ }
+ plugins = append([]plugin.Plugin{federationPlugin}, plugins...)
}
for _, o := range option {
@@ -58,6 +62,13 @@ func Generate(cfg *config.Config, option ...Option) error {
cfg.Sources = append(cfg.Sources, s)
}
}
+ if inj, ok := p.(plugin.EarlySourcesInjector); ok {
+ s, err := inj.InjectSourcesEarly()
+ if err != nil {
+ return fmt.Errorf("%s: %w", p.Name(), err)
+ }
+ cfg.Sources = append(cfg.Sources, s...)
+ }
}
if err := cfg.LoadSchema(); err != nil {
@@ -70,6 +81,13 @@ func Generate(cfg *config.Config, option ...Option) error {
cfg.Sources = append(cfg.Sources, s)
}
}
+ if inj, ok := p.(plugin.LateSourcesInjector); ok {
+ s, err := inj.InjectSourcesLate(cfg.Schema)
+ if err != nil {
+ return fmt.Errorf("%s: %w", p.Name(), err)
+ }
+ cfg.Sources = append(cfg.Sources, s...)
+ }
}
// LoadSchema again now we have everything
diff --git a/vendor/github.com/99designs/gqlgen/api/option.go b/vendor/github.com/99designs/gqlgen/api/option.go
index d376193d..344aa819 100644
--- a/vendor/github.com/99designs/gqlgen/api/option.go
+++ b/vendor/github.com/99designs/gqlgen/api/option.go
@@ -29,19 +29,20 @@ func PrependPlugin(p plugin.Plugin) Option {
// ReplacePlugin replaces any existing plugin with a matching plugin name
func ReplacePlugin(p plugin.Plugin) Option {
return func(cfg *config.Config, plugins *[]plugin.Plugin) {
- if plugins != nil {
- found := false
- ps := *plugins
- for i, o := range ps {
- if p.Name() == o.Name() {
- ps[i] = p
- found = true
- }
- }
- if !found {
- ps = append(ps, p)
+ if plugins == nil {
+ return
+ }
+ found := false
+ ps := *plugins
+ for i, o := range ps {
+ if p.Name() == o.Name() {
+ ps[i] = p
+ found = true
}
- *plugins = ps
}
+ if !found {
+ ps = append(ps, p)
+ }
+ *plugins = ps
}
}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/args.go b/vendor/github.com/99designs/gqlgen/codegen/args.go
index 983a3a02..736680b3 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/args.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/args.go
@@ -18,19 +18,20 @@ type ArgSet struct {
type FieldArgument struct {
*ast.ArgumentDefinition
- TypeReference *config.TypeReference
- VarName string // The name of the var in go
- Object *Object // A link back to the parent object
- Default any // The default value
- Directives []*Directive
- Value any // value set in Data
+ TypeReference *config.TypeReference
+ VarName string // The name of the var in go
+ Object *Object // A link back to the parent object
+ Default any // The default value
+ Directives []*Directive
+ Value any // value set in Data
+ CallArgumentDirectivesWithNull bool
}
-// ImplDirectives get not Builtin and location ARGUMENT_DEFINITION directive
+// ImplDirectives get not SkipRuntime and location ARGUMENT_DEFINITION directive
func (f *FieldArgument) ImplDirectives() []*Directive {
d := make([]*Directive, 0)
for i := range f.Directives {
- if !f.Directives[i].Builtin && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) {
+ if !f.Directives[i].SkipRuntime && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) {
d = append(d, f.Directives[i])
}
}
@@ -57,11 +58,12 @@ func (b *builder) buildArg(obj *Object, arg *ast.ArgumentDefinition) (*FieldArgu
return nil, err
}
newArg := FieldArgument{
- ArgumentDefinition: arg,
- TypeReference: tr,
- Object: obj,
- VarName: templates.ToGoPrivate(arg.Name),
- Directives: argDirs,
+ ArgumentDefinition: arg,
+ TypeReference: tr,
+ Object: obj,
+ VarName: templates.ToGoPrivate(arg.Name),
+ Directives: argDirs,
+ CallArgumentDirectivesWithNull: b.Config.CallArgumentDirectivesWithNull,
}
if arg.DefaultValue != nil {
diff --git a/vendor/github.com/99designs/gqlgen/codegen/args.gotpl b/vendor/github.com/99designs/gqlgen/codegen/args.gotpl
index 7b541ae1..2f3afdf0 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/args.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/args.gotpl
@@ -2,35 +2,67 @@
func (ec *executionContext) {{ $name }}(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) {
var err error
args := map[string]interface{}{}
+
{{- range $i, $arg := . }}
- var arg{{$i}} {{ $arg.TypeReference.GO | ref}}
- if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok {
- ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}}))
+ arg{{$i}}, err := ec.{{ $name }}{{$arg.Name | go}}(ctx, rawArgs)
+ if err != nil {
+ return nil, err
+ }
+ args[{{$arg.Name|quote}}] = arg{{$i}}
+ {{- end }}
+ return args, nil
+}
+
+ {{- range $i, $arg := . }}
+ func (ec *executionContext) {{ $name }}{{$arg.Name | go}}(
+ ctx context.Context,
+ rawArgs map[string]interface{},
+ ) ({{ $arg.TypeReference.GO | ref}}, error) {
+ {{- if not .CallArgumentDirectivesWithNull}}
+ // We won't call the directive if the argument is null.
+ // Set call_argument_directives_with_null to true to call directives
+ // even if the argument is null.
+ _, ok := rawArgs[{{$arg.Name|quote}}]
+ if !ok {
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, nil
+ }
+ {{end}}
+ ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}}))
{{- if $arg.ImplDirectives }}
- directive0 := func(ctx context.Context) (interface{}, error) { return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) }
+ directive0 := func(ctx context.Context) (interface{}, error) {
+ tmp, ok := rawArgs[{{$arg.Name|quote}}]
+ if !ok {
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, nil
+ }
+ return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
+ }
{{ template "implDirectives" $arg }}
- tmp, err = directive{{$arg.ImplDirectives|len}}(ctx)
+ tmp, err := directive{{$arg.ImplDirectives|len}}(ctx)
if err != nil {
- return nil, graphql.ErrorOnPath(ctx, err)
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, graphql.ErrorOnPath(ctx, err)
}
if data, ok := tmp.({{ $arg.TypeReference.GO | ref }}) ; ok {
- arg{{$i}} = data
+ return data, nil
{{- if $arg.TypeReference.IsNilable }}
} else if tmp == nil {
- arg{{$i}} = nil
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, nil
{{- end }}
} else {
- return nil, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp))
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp))
}
{{- else }}
- arg{{$i}}, err = ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
- if err != nil {
- return nil, err
+ if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok {
+ return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp)
}
+
+ var zeroVal {{ $arg.TypeReference.GO | ref}}
+ return zeroVal, nil
{{- end }}
}
- args[{{$arg.Name|quote}}] = arg{{$i}}
- {{- end }}
- return args, nil
-}
+ {{end}}
{{ end }}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go
index 91b6e500..fc7b2edc 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go
@@ -36,7 +36,7 @@ func (c *Config) NewBinder() *Binder {
}
func (b *Binder) TypePosition(typ types.Type) token.Position {
- named, isNamed := typ.(*types.Named)
+ named, isNamed := code.Unalias(typ).(*types.Named)
if !isNamed {
return token.Position{
Filename: "unknown",
@@ -77,10 +77,11 @@ func (b *Binder) FindType(pkgName, typeName string) (types.Type, error) {
return nil, err
}
- if fun, isFunc := obj.(*types.Func); isFunc {
- return fun.Type().(*types.Signature).Params().At(0).Type(), nil
+ t := code.Unalias(obj.Type())
+ if _, isFunc := obj.(*types.Func); isFunc {
+ return code.Unalias(t.(*types.Signature).Params().At(0).Type()), nil
}
- return obj.Type(), nil
+ return t, nil
}
func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) {
@@ -120,7 +121,7 @@ func (b *Binder) DefaultUserObject(name string) (types.Type, error) {
return nil, err
}
- return obj.Type(), nil
+ return code.Unalias(obj.Type()), nil
}
func (b *Binder) FindObject(pkgName, typeName string) (types.Object, error) {
@@ -193,19 +194,19 @@ func (b *Binder) PointerTo(ref *TypeReference) *TypeReference {
// TypeReference is used by args and field types. The Definition can refer to both input and output types.
type TypeReference struct {
- Definition *ast.Definition
- GQL *ast.Type
- GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
- Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
- CastType types.Type // Before calling marshalling functions cast from/to this base type
- Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
- Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
- IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
- IsOmittable bool // Is the type wrapped with Omittable
- IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
- PointersInUmarshalInput bool // Inverse values and pointers in return.
- IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription
- EnumValues []EnumValueReference
+ Definition *ast.Definition
+ GQL *ast.Type
+ GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target.
+ Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields.
+ CastType types.Type // Before calling marshalling functions cast from/to this base type
+ Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function
+ Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function
+ IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler
+ IsOmittable bool // Is the type wrapped with Omittable
+ IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety.
+ PointersInUnmarshalInput bool // Inverse values and pointers in return.
+ IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription
+ EnumValues []EnumValueReference
}
func (ref *TypeReference) Elem() *TypeReference {
@@ -264,13 +265,13 @@ func (ref *TypeReference) IsPtrToIntf() bool {
}
func (ref *TypeReference) IsNamed() bool {
- _, isSlice := ref.GO.(*types.Named)
- return isSlice
+ _, ok := ref.GO.(*types.Named)
+ return ok
}
func (ref *TypeReference) IsStruct() bool {
- _, isStruct := ref.GO.Underlying().(*types.Struct)
- return isStruct
+ _, ok := ref.GO.Underlying().(*types.Struct)
+ return ok
}
func (ref *TypeReference) IsScalar() bool {
@@ -362,6 +363,9 @@ func unwrapOmittable(t types.Type) (types.Type, bool) {
}
func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) {
+ if bindTarget != nil {
+ bindTarget = code.Unalias(bindTarget)
+ }
if innerType, ok := unwrapOmittable(bindTarget); ok {
if schemaType.NonNull {
return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name())
@@ -433,28 +437,28 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
if err != nil {
return nil, err
}
-
+ t := code.Unalias(obj.Type())
if values := b.enumValues(def); len(values) > 0 {
err = b.enumReference(ref, obj, values)
if err != nil {
return nil, err
}
} else if fun, isFunc := obj.(*types.Func); isFunc {
- ref.GO = fun.Type().(*types.Signature).Params().At(0).Type()
- ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler"
+ ref.GO = code.Unalias(t.(*types.Signature).Params().At(0).Type())
+ ref.IsContext = code.Unalias(t.(*types.Signature).Results().At(0).Type()).String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler"
ref.Marshaler = fun
ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil)
- } else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") {
- ref.GO = obj.Type()
+ } else if hasMethod(t, "MarshalGQLContext") && hasMethod(t, "UnmarshalGQLContext") {
+ ref.GO = t
ref.IsContext = true
ref.IsMarshaler = true
- } else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") {
- ref.GO = obj.Type()
+ } else if hasMethod(t, "MarshalGQL") && hasMethod(t, "UnmarshalGQL") {
+ ref.GO = t
ref.IsMarshaler = true
- } else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String {
+ } else if underlying := basicUnderlying(t); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String {
// TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595)
- ref.GO = obj.Type()
+ ref.GO = t
ref.CastType = underlying
underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil)
@@ -465,7 +469,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
ref.Marshaler = underlyingRef.Marshaler
ref.Unmarshaler = underlyingRef.Unmarshaler
} else {
- ref.GO = obj.Type()
+ ref.GO = t
}
ref.Target = ref.GO
@@ -478,7 +482,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret
ref.GO = bindTarget
}
- ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput
+ ref.PointersInUnmarshalInput = b.cfg.ReturnPointersInUnmarshalInput
return ref, nil
}
@@ -516,6 +520,10 @@ func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type {
}
func IsNilable(t types.Type) bool {
+ // Note that we use types.Unalias rather than code.Unalias here
+ // because we want to always check the underlying type.
+ // code.Unalias only unwraps aliases in Go 1.23
+ t = types.Unalias(t)
if namedType, isNamed := t.(*types.Named); isNamed {
return IsNilable(namedType.Underlying())
}
@@ -587,10 +595,11 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[
return fmt.Errorf("not all enum values are binded for %v", ref.Definition.Name)
}
- if fn, ok := obj.Type().(*types.Signature); ok {
- ref.GO = fn.Params().At(0).Type()
+ t := code.Unalias(obj.Type())
+ if fn, ok := t.(*types.Signature); ok {
+ ref.GO = code.Unalias(fn.Params().At(0).Type())
} else {
- ref.GO = obj.Type()
+ ref.GO = t
}
str, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil)
@@ -618,9 +627,10 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[
return err
}
- if !types.AssignableTo(valueObj.Type(), ref.GO) {
+ valueTyp := code.Unalias(valueObj.Type())
+ if !types.AssignableTo(valueTyp, ref.GO) {
return fmt.Errorf("wrong type: %v, for enum value: %v, expected type: %v, of enum: %v",
- valueObj.Type(), value.Name, ref.GO, ref.Definition.Name)
+ valueTyp, value.Name, ref.GO, ref.Definition.Name)
}
switch valueObj.(type) {
diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/config.go b/vendor/github.com/99designs/gqlgen/codegen/config/config.go
index 3228756c..761e3524 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/config/config.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/config/config.go
@@ -42,16 +42,22 @@ type Config struct {
OmitRootModels bool `yaml:"omit_root_models,omitempty"`
OmitResolverFields bool `yaml:"omit_resolver_fields,omitempty"`
OmitPanicHandler bool `yaml:"omit_panic_handler,omitempty"`
- StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
- ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
- ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
- NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"`
- EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"`
- SkipValidation bool `yaml:"skip_validation,omitempty"`
- SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"`
- Sources []*ast.Source `yaml:"-"`
- Packages *code.Packages `yaml:"-"`
- Schema *ast.Schema `yaml:"-"`
+ // If this is set to true, argument directives that
+ // decorate a field with a null value will still be called.
+ //
+ // This enables argumment directives to not just mutate
+ // argument values but to set them even if they're null.
+ CallArgumentDirectivesWithNull bool `yaml:"call_argument_directives_with_null,omitempty"`
+ StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
+ ReturnPointersInUnmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"`
+ ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
+ NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"`
+ EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"`
+ SkipValidation bool `yaml:"skip_validation,omitempty"`
+ SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"`
+ Sources []*ast.Source `yaml:"-"`
+ Packages *code.Packages `yaml:"-"`
+ Schema *ast.Schema `yaml:"-"`
// Deprecated: use Federation instead. Will be removed next release
Federated bool `yaml:"federated,omitempty"`
@@ -62,15 +68,15 @@ var cfgFilenames = []string{".gqlgen.yml", "gqlgen.yml", "gqlgen.yaml"}
// DefaultConfig creates a copy of the default config
func DefaultConfig() *Config {
return &Config{
- SchemaFilename: StringList{"schema.graphql"},
- Model: PackageConfig{Filename: "models_gen.go"},
- Exec: ExecConfig{Filename: "generated.go"},
- Directives: map[string]DirectiveConfig{},
- Models: TypeMap{},
- StructFieldsAlwaysPointers: true,
- ReturnPointersInUmarshalInput: false,
- ResolversAlwaysReturnPointers: true,
- NullableInputOmittable: false,
+ SchemaFilename: StringList{"schema.graphql"},
+ Model: PackageConfig{Filename: "models_gen.go"},
+ Exec: ExecConfig{Filename: "generated.go"},
+ Directives: map[string]DirectiveConfig{},
+ Models: TypeMap{},
+ StructFieldsAlwaysPointers: true,
+ ReturnPointersInUnmarshalInput: false,
+ ResolversAlwaysReturnPointers: true,
+ NullableInputOmittable: false,
}
}
@@ -320,24 +326,33 @@ func (c *Config) injectTypesFromSchema() error {
}
}
- if schemaType.Kind == ast.Object || schemaType.Kind == ast.InputObject {
+ if schemaType.Kind == ast.Object ||
+ schemaType.Kind == ast.InputObject ||
+ schemaType.Kind == ast.Interface {
for _, field := range schemaType.Fields {
if fd := field.Directives.ForName("goField"); fd != nil {
forceResolver := c.Models[schemaType.Name].Fields[field.Name].Resolver
- fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName
-
if ra := fd.Arguments.ForName("forceResolver"); ra != nil {
if fr, err := ra.Value.Value(nil); err == nil {
forceResolver = fr.(bool)
}
}
+ fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName
if na := fd.Arguments.ForName("name"); na != nil {
if fr, err := na.Value.Value(nil); err == nil {
fieldName = fr.(string)
}
}
+ omittable := c.Models[schemaType.Name].Fields[field.Name].Omittable
+ if arg := fd.Arguments.ForName("omittable"); arg != nil {
+ if k, err := arg.Value.Value(nil); err == nil {
+ val := k.(bool)
+ omittable = &val
+ }
+ }
+
if c.Models[schemaType.Name].Fields == nil {
c.Models[schemaType.Name] = TypeMapEntry{
Model: c.Models[schemaType.Name].Model,
@@ -349,6 +364,7 @@ func (c *Config) injectTypesFromSchema() error {
c.Models[schemaType.Name].Fields[field.Name] = TypeMapField{
FieldName: fieldName,
Resolver: forceResolver,
+ Omittable: omittable,
}
}
}
@@ -449,6 +465,7 @@ type TypeMapEntry struct {
type TypeMapField struct {
Resolver bool `yaml:"resolver"`
FieldName string `yaml:"fieldName"`
+ Omittable *bool `yaml:"omittable"`
GeneratedMethod string `yaml:"-"`
}
@@ -659,6 +676,16 @@ func (tm TypeMap) ForceGenerate(name string, forceGenerate bool) {
type DirectiveConfig struct {
SkipRuntime bool `yaml:"skip_runtime"`
+
+ // If the directive implementation is statically defined, don't provide a hook for it
+ // in the generated server. This is useful for directives that are implemented
+ // by plugins or the runtime itself.
+ //
+ // The function implemmentation should be provided here as a string.
+ //
+ // The function should have the following signature:
+ // func(ctx context.Context, obj any, next graphql.Resolver[, directive arguments if any]) (res any, err error)
+ Implementation *string
}
func inStrSlice(haystack []string, needle string) bool {
diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go b/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go
index 25e7331f..432c56e0 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go
@@ -14,7 +14,7 @@ type GoInitialismsConfig struct {
Initialisms []string `yaml:"initialisms"`
}
-// setInitialisms adjustes GetInitialisms based on its settings.
+// setInitialisms adjusts GetInitialisms based on its settings.
func (i GoInitialismsConfig) setInitialisms() {
toUse := i.determineGoInitialisms()
templates.GetInitialisms = func() map[string]bool {
@@ -22,7 +22,7 @@ func (i GoInitialismsConfig) setInitialisms() {
}
}
-// determineGoInitialisms returns the Go initialims to be used, based on its settings.
+// determineGoInitialisms returns the Go initialisms to be used, based on its settings.
func (i GoInitialismsConfig) determineGoInitialisms() (initialismsToUse map[string]bool) {
if i.ReplaceDefaults {
initialismsToUse = make(map[string]bool, len(i.Initialisms))
diff --git a/vendor/github.com/99designs/gqlgen/codegen/data.go b/vendor/github.com/99designs/gqlgen/codegen/data.go
index 7110de2f..c5c3fcc6 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/data.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/data.go
@@ -64,6 +64,30 @@ type builder struct {
Directives map[string]*Directive
}
+// Get only the directives which should have a user provided definition on server instantiation
+func (d *Data) UserDirectives() DirectiveList {
+ res := DirectiveList{}
+ directives := d.Directives()
+ for k, directive := range directives {
+ if directive.Implementation == nil {
+ res[k] = directive
+ }
+ }
+ return res
+}
+
+// Get only the directives which should have a statically provided definition
+func (d *Data) BuiltInDirectives() DirectiveList {
+ res := DirectiveList{}
+ directives := d.Directives()
+ for k, directive := range directives {
+ if directive.Implementation != nil {
+ res[k] = directive
+ }
+ }
+ return res
+}
+
// Get only the directives which are defined in the config's sources.
func (d *Data) Directives() DirectiveList {
res := DirectiveList{}
@@ -97,7 +121,7 @@ func BuildData(cfg *config.Config, plugins ...any) (*Data, error) {
dataDirectives := make(map[string]*Directive)
for name, d := range b.Directives {
- if !d.Builtin {
+ if !d.SkipRuntime {
dataDirectives[name] = d
}
}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/directive.go b/vendor/github.com/99designs/gqlgen/codegen/directive.go
index 30a79c35..077bd9f7 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/directive.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/directive.go
@@ -7,6 +7,7 @@ import (
"github.com/vektah/gqlparser/v2/ast"
+ "github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
)
@@ -19,9 +20,10 @@ func (dl DirectiveList) LocationDirectives(location string) DirectiveList {
type Directive struct {
*ast.DirectiveDefinition
- Name string
- Args []*FieldArgument
- Builtin bool
+ Name string
+ Args []*FieldArgument
+
+ config.DirectiveConfig
}
// IsLocation check location directive
@@ -82,7 +84,7 @@ func (b *builder) buildDirectives() (map[string]*Directive, error) {
DirectiveDefinition: dir,
Name: name,
Args: args,
- Builtin: b.Config.Directives[name].SkipRuntime,
+ DirectiveConfig: b.Config.Directives[name],
}
}
@@ -122,7 +124,7 @@ func (b *builder) getDirectives(list ast.DirectiveList) ([]*Directive, error) {
Name: d.Name,
Args: args,
DirectiveDefinition: list[i].Definition,
- Builtin: b.Config.Directives[d.Name].SkipRuntime,
+ DirectiveConfig: b.Config.Directives[d.Name],
}
}
@@ -162,8 +164,12 @@ func (d *Directive) ResolveArgs(obj string, next int) string {
return strings.Join(args, ", ")
}
+func (d *Directive) CallName() string {
+ return ucFirst(d.Name)
+}
+
func (d *Directive) Declaration() string {
- res := ucFirst(d.Name) + " func(ctx context.Context, obj interface{}, next graphql.Resolver"
+ res := d.CallName() + " func(ctx context.Context, obj interface{}, next graphql.Resolver"
for _, arg := range d.Args {
res += fmt.Sprintf(", %s %s", templates.ToGoPrivate(arg.Name), templates.CurrentImports.LookupType(arg.TypeReference.GO))
@@ -172,3 +178,23 @@ func (d *Directive) Declaration() string {
res += ") (res interface{}, err error)"
return res
}
+
+func (d *Directive) IsBuiltIn() bool {
+ return d.Implementation != nil
+}
+
+func (d *Directive) CallPath() string {
+ if d.IsBuiltIn() {
+ return "builtInDirective" + d.CallName()
+ }
+
+ return "ec.directives." + d.CallName()
+}
+
+func (d *Directive) FunctionImpl() string {
+ if d.Implementation == nil {
+ return ""
+ }
+
+ return d.CallPath() + " = " + *d.Implementation
+}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl b/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl
index 23bcf0f8..c3fa8abe 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl
@@ -1,23 +1,29 @@
{{ define "implDirectives" }}{{ $in := .DirectiveObjName }}
+ {{ $zeroVal := .TypeReference.GO | ref}}
{{- range $i, $directive := .ImplDirectives -}}
directive{{add $i 1}} := func(ctx context.Context) (interface{}, error) {
{{- range $arg := $directive.Args }}
{{- if notNil "Value" $arg }}
{{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Value | dump }})
if err != nil{
- return nil, err
+ var zeroVal {{$zeroVal}}
+ return zeroVal, err
}
{{- else if notNil "Default" $arg }}
{{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Default | dump }})
if err != nil{
- return nil, err
+ var zeroVal {{$zeroVal}}
+ return zeroVal, err
}
{{- end }}
{{- end }}
- if ec.directives.{{$directive.Name|ucFirst}} == nil {
- return nil, errors.New("directive {{$directive.Name}} is not implemented")
- }
- return ec.directives.{{$directive.Name|ucFirst}}({{$directive.ResolveArgs $in $i }})
+ {{- if not $directive.IsBuiltIn}}
+ if {{$directive.CallPath}} == nil {
+ var zeroVal {{$zeroVal}}
+ return zeroVal, errors.New("directive {{$directive.Name}} is not implemented")
+ }
+ {{- end}}
+ return {{$directive.CallPath}}({{$directive.ResolveArgs $in $i }})
}
{{ end -}}
{{ end }}
@@ -37,10 +43,7 @@
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
- if ec.directives.{{$directive.Name|ucFirst}} == nil {
- return nil, errors.New("directive {{$directive.Name}} is not implemented")
- }
- return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
+ {{- template "callDirective" $directive -}}
}
{{- end }}
}
@@ -57,6 +60,15 @@
return graphql.Null
{{end}}
+{{define "callDirective"}}
+ {{- if not .IsBuiltIn}}
+ if {{.CallPath}} == nil {
+ return nil, errors.New("directive {{.Name}} is not implemented")
+ }
+ {{- end}}
+ return {{.CallPath}}({{.CallArgs}})
+{{end}}
+
{{ if .Directives.LocationDirectives "QUERY" }}
func (ec *executionContext) _queryMiddleware(ctx context.Context, obj *ast.OperationDefinition, next func(ctx context.Context) (interface{}, error)) graphql.Marshaler {
{{ template "queryDirectives" .Directives.LocationDirectives "QUERY" }}
@@ -87,10 +99,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
- if ec.directives.{{$directive.Name|ucFirst}} == nil {
- return nil, errors.New("directive {{$directive.Name}} is not implemented")
- }
- return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
+ {{- template "callDirective" $directive -}}
}
{{- end }}
}
@@ -130,10 +139,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as
{{- end }}
n := next
next = func(ctx context.Context) (interface{}, error) {
- if ec.directives.{{$directive.Name|ucFirst}} == nil {
- return nil, errors.New("directive {{$directive.Name}} is not implemented")
- }
- return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}})
+ {{- template "callDirective" $directive -}}
}
{{- end }}
}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/field.go b/vendor/github.com/99designs/gqlgen/codegen/field.go
index 509f48cd..7f4a5ad1 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/field.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/field.go
@@ -16,6 +16,7 @@ import (
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
+ "github.com/99designs/gqlgen/internal/code"
)
type Field struct {
@@ -144,7 +145,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) {
f.GoFieldName = b.Config.Models[obj.Name].Fields[f.Name].FieldName
}
- target, err := b.findBindTarget(obj.Type.(*types.Named), f.GoFieldName)
+ target, err := b.findBindTarget(obj.Type, f.GoFieldName)
if err != nil {
return err
}
@@ -229,7 +230,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) {
}
// findBindTarget attempts to match the name to a field or method on a Type
-// with the following priorites:
+// with the following priorities:
// 1. Any Fields with a struct tag (see config.StructTag). Errors if more than one match is found
// 2. Any method or field with a matching name. Errors if more than one match is found
// 3. Same logic again for embedded fields
@@ -380,7 +381,7 @@ func (b *builder) findBindStructEmbedsTarget(strukt *types.Struct, name string)
continue
}
- fieldType := field.Type()
+ fieldType := code.Unalias(field.Type())
if ptr, ok := fieldType.(*types.Pointer); ok {
fieldType = ptr.Elem()
}
@@ -442,7 +443,7 @@ func (f *Field) ImplDirectives() []*Directive {
loc = ast.LocationInputFieldDefinition
}
for i := range f.Directives {
- if !f.Directives[i].Builtin &&
+ if !f.Directives[i].SkipRuntime &&
(f.Directives[i].IsLocation(loc, ast.LocationObject) || f.Directives[i].IsLocation(loc, ast.LocationInputObject)) {
d = append(d, f.Directives[i])
}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl
index b343e626..1e5bfbfc 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl
@@ -1,3 +1,4 @@
+{{/* Context object: codegen.Data */}}
{{ reserveImport "context" }}
{{ reserveImport "fmt" }}
{{ reserveImport "io" }}
@@ -46,7 +47,7 @@
}
type DirectiveRoot struct {
- {{ range $directive := .Directives }}
+ {{ range $directive := .UserDirectives }}
{{- $directive.Declaration }}
{{ end }}
}
@@ -93,6 +94,12 @@
{{- end }}
{{- end }}
+{{ range $directive := .BuiltInDirectives }}
+ var (
+ {{- $directive.FunctionImpl }}
+ )
+{{ end }}
+
{{ if eq .Config.Exec.Layout "single-file" }}
type executableSchema struct {
schema *ast.Schema
diff --git a/vendor/github.com/99designs/gqlgen/codegen/input.gotpl b/vendor/github.com/99designs/gqlgen/codegen/input.gotpl
index 9240b56c..1b91d8db 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/input.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/input.gotpl
@@ -1,10 +1,10 @@
{{- range $input := .Inputs }}
{{- if not .HasUnmarshal }}
{{- $it := "it" }}
- {{- if .PointersInUmarshalInput }}
+ {{- if .PointersInUnmarshalInput }}
{{- $it = "&it" }}
{{- end }}
- func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUmarshalInput }}*{{ end }}{{.Type | ref}}, error) {
+ func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUnmarshalInput }}*{{ end }}{{.Type | ref}}, error) {
{{- if $input.IsMap }}
it := make(map[string]interface{}, len(obj.(map[string]interface{})))
{{- else }}
diff --git a/vendor/github.com/99designs/gqlgen/codegen/object.go b/vendor/github.com/99designs/gqlgen/codegen/object.go
index eee43849..869d1b36 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/object.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/object.go
@@ -26,15 +26,15 @@ const (
type Object struct {
*ast.Definition
- Type types.Type
- ResolverInterface types.Type
- Root bool
- Fields []*Field
- Implements []*ast.Definition
- DisableConcurrency bool
- Stream bool
- Directives []*Directive
- PointersInUmarshalInput bool
+ Type types.Type
+ ResolverInterface types.Type
+ Root bool
+ Fields []*Field
+ Implements []*ast.Definition
+ DisableConcurrency bool
+ Stream bool
+ Directives []*Directive
+ PointersInUnmarshalInput bool
}
func (b *builder) buildObject(typ *ast.Definition) (*Object, error) {
@@ -44,12 +44,12 @@ func (b *builder) buildObject(typ *ast.Definition) (*Object, error) {
}
caser := cases.Title(language.English, cases.NoLower)
obj := &Object{
- Definition: typ,
- Root: b.Config.IsRoot(typ),
- DisableConcurrency: typ == b.Schema.Mutation,
- Stream: typ == b.Schema.Subscription,
- Directives: dirs,
- PointersInUmarshalInput: b.Config.ReturnPointersInUmarshalInput,
+ Definition: typ,
+ Root: b.Config.IsRoot(typ),
+ DisableConcurrency: typ == b.Schema.Mutation,
+ Stream: typ == b.Schema.Subscription,
+ Directives: dirs,
+ PointersInUnmarshalInput: b.Config.ReturnPointersInUnmarshalInput,
ResolverInterface: types.NewNamed(
types.NewTypeName(0, b.Config.Exec.Pkg(), caser.String(typ.Name)+"Resolver", nil),
nil,
diff --git a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl
index 0b90ad53..d392ef53 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl
@@ -1,3 +1,4 @@
+{{/* Context object: codegen.Data */}}
{{ reserveImport "context" }}
{{ reserveImport "fmt" }}
{{ reserveImport "io" }}
@@ -45,7 +46,7 @@ type ResolverRoot interface {
}
type DirectiveRoot struct {
-{{ range $directive := .Directives }}
+{{ range $directive := .UserDirectives }}
{{- $directive.Declaration }}
{{ end }}
}
@@ -67,6 +68,12 @@ type ComplexityRoot struct {
{{- end }}
}
+{{ range $directive := .BuiltInDirectives }}
+ var (
+ {{- $directive.FunctionImpl }}
+ )
+{{ end }}
+
type executableSchema struct {
schema *ast.Schema
resolvers ResolverRoot
diff --git a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go
index 4de30761..9b6e4cfb 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go
+++ b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go
@@ -495,18 +495,18 @@ func wordWalker(str string, f func(*wordInfo)) {
if initialisms[upperWord] {
// If the uppercase word (string(runes[w:i]) is "ID" or "IP"
// AND
- // the word is the first two characters of the str
+ // the word is the first two characters of the current word
// AND
// that is not the end of the word
// AND
- // the length of the string is greater than 3
+ // the length of the remaining string is greater than 3
// AND
// the third rune is an uppercase one
// THEN
// do NOT count this as an initialism.
switch upperWord {
case "ID", "IP":
- if word == str[:2] && !eow && len(str) > 3 && unicode.IsUpper(runes[3]) {
+ if remainingRunes := runes[w:]; word == string(remainingRunes[:2]) && !eow && len(remainingRunes) > 3 && unicode.IsUpper(remainingRunes[3]) {
continue
}
}
@@ -694,7 +694,7 @@ var pkgReplacer = strings.NewReplacer(
func TypeIdentifier(t types.Type) string {
res := ""
for {
- switch it := t.(type) {
+ switch it := code.Unalias(t).(type) {
case *types.Pointer:
t.Underlying()
res += "ᚖ"
@@ -771,6 +771,8 @@ var CommonInitialisms = map[string]bool{
"XMPP": true,
"XSRF": true,
"XSS": true,
+ "AWS": true,
+ "GCP": true,
}
// GetInitialisms returns the initialisms to capitalize in Go names. If unchanged, default initialisms will be returned
diff --git a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl
index ebebdf14..1898d444 100644
--- a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl
+++ b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl
@@ -76,9 +76,9 @@
return res, graphql.ErrorOnPath(ctx, err)
{{- else }}
res, err := ec.unmarshalInput{{ $type.GQL.Name }}(ctx, v)
- {{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUmarshalInput) }}
+ {{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUnmarshalInput) }}
return &res, graphql.ErrorOnPath(ctx, err)
- {{- else if and (not $type.IsNilable) $type.PointersInUmarshalInput }}
+ {{- else if and (not $type.IsNilable) $type.PointersInUnmarshalInput }}
return *res, graphql.ErrorOnPath(ctx, err)
{{- else }}
return res, graphql.ErrorOnPath(ctx, err)
diff --git a/vendor/github.com/99designs/gqlgen/graphql/bool.go b/vendor/github.com/99designs/gqlgen/graphql/bool.go
index b01f6eb1..d9797a38 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/bool.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/bool.go
@@ -20,6 +20,8 @@ func UnmarshalBoolean(v any) (bool, error) {
return v != 0, nil
case bool:
return v, nil
+ case nil:
+ return false, nil
default:
return false, fmt.Errorf("%T is not a bool", v)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/cache.go b/vendor/github.com/99designs/gqlgen/graphql/cache.go
index ef2dd5a5..8804cfe0 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/cache.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/cache.go
@@ -3,27 +3,29 @@ package graphql
import "context"
// Cache is a shared store for APQ and query AST caching
-type Cache interface {
+type Cache[T any] interface {
// Get looks up a key's value from the cache.
- Get(ctx context.Context, key string) (value any, ok bool)
+ Get(ctx context.Context, key string) (value T, ok bool)
// Add adds a value to the cache.
- Add(ctx context.Context, key string, value any)
+ Add(ctx context.Context, key string, value T)
}
// MapCache is the simplest implementation of a cache, because it can not evict it should only be used in tests
-type MapCache map[string]any
+type MapCache[T any] map[string]T
// Get looks up a key's value from the cache.
-func (m MapCache) Get(_ context.Context, key string) (value any, ok bool) {
+func (m MapCache[T]) Get(_ context.Context, key string) (value T, ok bool) {
v, ok := m[key]
return v, ok
}
// Add adds a value to the cache.
-func (m MapCache) Add(_ context.Context, key string, value any) { m[key] = value }
+func (m MapCache[T]) Add(_ context.Context, key string, value T) { m[key] = value }
-type NoCache struct{}
+type NoCache[T any, T2 *T] struct{}
-func (n NoCache) Get(_ context.Context, _ string) (value any, ok bool) { return nil, false }
-func (n NoCache) Add(_ context.Context, _ string, _ any) {}
+var _ Cache[*string] = (*NoCache[string, *string])(nil)
+
+func (n NoCache[T, T2]) Get(_ context.Context, _ string) (value T2, ok bool) { return nil, false }
+func (n NoCache[T, T2]) Add(_ context.Context, _ string, _ T2) {}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go
index 426ad09b..566b0476 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go
@@ -22,7 +22,7 @@ type Executor struct {
errorPresenter graphql.ErrorPresenterFunc
recoverFunc graphql.RecoverFunc
- queryCache graphql.Cache
+ queryCache graphql.Cache[*ast.QueryDocument]
parserTokenLimit int
}
@@ -36,7 +36,7 @@ func New(es graphql.ExecutableSchema) *Executor {
es: es,
errorPresenter: graphql.DefaultErrorPresenter,
recoverFunc: graphql.DefaultRecover,
- queryCache: graphql.NoCache{},
+ queryCache: graphql.NoCache[ast.QueryDocument, *ast.QueryDocument]{},
ext: processExtensions(nil),
parserTokenLimit: parserTokenNoLimit,
}
@@ -84,7 +84,6 @@ func (e *Executor) CreateOperationContext(
var err error
rc.Variables, err = validator.VariableValues(e.es.Schema(), rc.Operation, params.Variables)
-
if err != nil {
gqlErr, ok := err.(*gqlerror.Error)
if ok {
@@ -162,7 +161,7 @@ func (e *Executor) PresentRecoveredError(ctx context.Context, err any) error {
return e.errorPresenter(ctx, e.recoverFunc(ctx, err))
}
-func (e *Executor) SetQueryCache(cache graphql.Cache) {
+func (e *Executor) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) {
e.queryCache = cache
}
@@ -195,7 +194,7 @@ func (e *Executor) parseQuery(
stats.Parsing.End = now
stats.Validation.Start = now
- return doc.(*ast.QueryDocument), nil
+ return doc, nil
}
doc, err := parser.ParseQueryWithTokenLimit(&ast.Source{Input: query}, e.parserTokenLimit)
diff --git a/vendor/github.com/99designs/gqlgen/graphql/float.go b/vendor/github.com/99designs/gqlgen/graphql/float.go
index 465f46af..b140d5bc 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/float.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/float.go
@@ -28,6 +28,8 @@ func UnmarshalFloat(v any) (float64, error) {
return v, nil
case json.Number:
return strconv.ParseFloat(string(v), 64)
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an float", v)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go
index 115aaa8a..a4cb32c9 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go
@@ -23,7 +23,7 @@ const (
// hash in the next request.
// see https://github.com/apollographql/apollo-link-persisted-queries
type AutomaticPersistedQuery struct {
- Cache graphql.Cache
+ Cache graphql.Cache[string]
}
type ApqStats struct {
@@ -72,14 +72,14 @@ func (a AutomaticPersistedQuery) MutateOperationParameters(ctx context.Context,
fullQuery := false
if rawParams.Query == "" {
+ var ok bool
// client sent optimistic query hash without query string, get it from the cache
- query, ok := a.Cache.Get(ctx, extension.Sha256)
+ rawParams.Query, ok = a.Cache.Get(ctx, extension.Sha256)
if !ok {
err := gqlerror.Errorf(errPersistedQueryNotFound)
errcode.Set(err, errPersistedQueryNotFoundCode)
return err
}
- rawParams.Query = query.(string)
} else {
// client sent optimistic query hash with query string, verify and store it
if computeQueryHash(rawParams.Query) != extension.Sha256 {
diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go
index 9dc480e9..946022bf 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go
@@ -8,26 +8,26 @@ import (
"github.com/99designs/gqlgen/graphql"
)
-type LRU struct {
- lru *lru.Cache[string, any]
+type LRU[T any] struct {
+ lru *lru.Cache[string, T]
}
-var _ graphql.Cache = &LRU{}
+var _ graphql.Cache[any] = &LRU[any]{}
-func New(size int) *LRU {
- cache, err := lru.New[string, any](size)
+func New[T any](size int) *LRU[T] {
+ cache, err := lru.New[string, T](size)
if err != nil {
// An error is only returned for non-positive cache size
// and we already checked for that.
panic("unexpected error creating cache: " + err.Error())
}
- return &LRU{cache}
+ return &LRU[T]{cache}
}
-func (l LRU) Get(ctx context.Context, key string) (value any, ok bool) {
+func (l LRU[T]) Get(ctx context.Context, key string) (value T, ok bool) {
return l.lru.Get(key)
}
-func (l LRU) Add(ctx context.Context, key string, value any) {
+func (l LRU[T]) Add(ctx context.Context, key string, value T) {
l.lru.Add(key, value)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go
index 54376b13..644bad8d 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go
@@ -8,6 +8,7 @@ import (
"net/http"
"time"
+ "github.com/vektah/gqlparser/v2/ast"
"github.com/vektah/gqlparser/v2/gqlerror"
"github.com/99designs/gqlgen/graphql"
@@ -41,11 +42,11 @@ func NewDefaultServer(es graphql.ExecutableSchema) *Server {
srv.AddTransport(transport.POST{})
srv.AddTransport(transport.MultipartForm{})
- srv.SetQueryCache(lru.New(1000))
+ srv.SetQueryCache(lru.New[*ast.QueryDocument](1000))
srv.Use(extension.Introspection{})
srv.Use(extension.AutomaticPersistedQuery{
- Cache: lru.New(100),
+ Cache: lru.New[string](100),
})
return srv
@@ -63,7 +64,7 @@ func (s *Server) SetRecoverFunc(f graphql.RecoverFunc) {
s.exec.SetRecoverFunc(f)
}
-func (s *Server) SetQueryCache(cache graphql.Cache) {
+func (s *Server) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) {
s.exec.SetQueryCache(cache)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go
index bd511525..0bad1110 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go
@@ -88,7 +88,6 @@ func cleanupBody(body string) (out string, err error) {
// is where query starts. If it is, query is url encoded.
if strings.HasPrefix(body, "%7B") {
body, err = url.QueryUnescape(body)
-
if err != nil {
return body, err
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go
index 651ccee4..32e31c7c 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go
@@ -198,7 +198,7 @@ func (c *wsConnection) init() bool {
var ctx context.Context
ctx, initAckPayload, err = c.InitFunc(c.ctx, c.initPayload)
if err != nil {
- c.sendConnectionError(err.Error())
+ c.sendConnectionError("%s", err.Error())
c.close(websocket.CloseNormalClosure, "terminated")
return false
}
@@ -239,7 +239,6 @@ func (c *wsConnection) run() {
ctx, cancel := context.WithCancel(c.ctx)
defer func() {
cancel()
- c.close(websocket.CloseAbnormalClosure, "unexpected closure")
}()
// If we're running in graphql-ws mode, create a timer that will trigger a
@@ -369,7 +368,7 @@ func (c *wsConnection) closeOnCancel(ctx context.Context) {
<-ctx.Done()
if r := closeReasonForContext(ctx); r != "" {
- c.sendConnectionError(r)
+ c.sendConnectionError("%s", r)
}
c.close(websocket.CloseNormalClosure, "terminated")
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/int.go b/vendor/github.com/99designs/gqlgen/graphql/int.go
index 2a5604e9..41cad3f1 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/int.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/int.go
@@ -23,6 +23,8 @@ func UnmarshalInt(v any) (int, error) {
return int(v), nil
case json.Number:
return strconv.Atoi(string(v))
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}
@@ -44,6 +46,8 @@ func UnmarshalInt64(v any) (int64, error) {
return v, nil
case json.Number:
return strconv.ParseInt(string(v), 10, 64)
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}
@@ -73,6 +77,8 @@ func UnmarshalInt32(v any) (int32, error) {
return 0, err
}
return int32(iv), nil
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an int", v)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/string.go b/vendor/github.com/99designs/gqlgen/graphql/string.go
index 61da5810..6622734e 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/string.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/string.go
@@ -62,7 +62,7 @@ func UnmarshalString(v any) (string, error) {
case bool:
return strconv.FormatBool(v), nil
case nil:
- return "null", nil
+ return "", nil
default:
return "", fmt.Errorf("%T is not a string", v)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/uint.go b/vendor/github.com/99designs/gqlgen/graphql/uint.go
index ffccaf64..cd5d2355 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/uint.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/uint.go
@@ -34,6 +34,8 @@ func UnmarshalUint(v any) (uint, error) {
case json.Number:
u64, err := strconv.ParseUint(string(v), 10, 64)
return uint(u64), err
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}
@@ -63,6 +65,8 @@ func UnmarshalUint64(v any) (uint64, error) {
return uint64(v), nil
case json.Number:
return strconv.ParseUint(string(v), 10, 64)
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}
@@ -100,6 +104,8 @@ func UnmarshalUint32(v any) (uint32, error) {
return 0, err
}
return uint32(iv), nil
+ case nil:
+ return 0, nil
default:
return 0, fmt.Errorf("%T is not an uint", v)
}
diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go
index 82266736..2031242f 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/version.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/version.go
@@ -1,3 +1,3 @@
package graphql
-const Version = "v0.17.49"
+const Version = "v0.17.55"
diff --git a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl
index 6e97f8bf..648ec2b4 100644
--- a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl
+++ b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl
@@ -11,6 +11,9 @@ exec:
# federation:
# filename: graph/federation.go
# package: graph
+# version: 2
+# options
+# computed_requires: true
# Where should any generated models go?
model:
@@ -63,6 +66,13 @@ resolver:
# Optional: set to skip running `go mod tidy` when generating server code
# skip_mod_tidy: true
+# Optional: if this is set to true, argument directives that
+# decorate a field with a null value will still be called.
+#
+# This enables argumment directives to not just mutate
+# argument values but to set them even if they're null.
+call_argument_directives_with_null: true
+
# gqlgen will search for any type names in the schema in these go packages
# if they match it will use them, otherwise it will generate them.
autobind:
diff --git a/vendor/github.com/99designs/gqlgen/internal/code/alias.go b/vendor/github.com/99designs/gqlgen/internal/code/alias.go
new file mode 100644
index 00000000..f7685801
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/internal/code/alias.go
@@ -0,0 +1,13 @@
+//go:build !go1.23
+
+package code
+
+import (
+ "go/types"
+)
+
+// Unalias unwraps an alias type
+// TODO: Drop this function when we drop support for go1.22
+func Unalias(t types.Type) types.Type {
+ return t // No-op
+}
diff --git a/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go b/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go
new file mode 100644
index 00000000..fa0b216c
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go
@@ -0,0 +1,19 @@
+//go:build go1.23
+
+package code
+
+import (
+ "go/types"
+)
+
+// Unalias unwraps an alias type
+func Unalias(t types.Type) types.Type {
+ if p, ok := t.(*types.Pointer); ok {
+ // If the type come from auto-binding,
+ // it will be a pointer to an alias type.
+ // (e.g: `type Cursor = entgql.Cursor[int]`)
+ // *ent.Cursor is the type we got from auto-binding.
+ return types.NewPointer(Unalias(p.Elem()))
+ }
+ return types.Unalias(t)
+}
diff --git a/vendor/github.com/99designs/gqlgen/internal/code/compare.go b/vendor/github.com/99designs/gqlgen/internal/code/compare.go
index a3f15f18..05fad22d 100644
--- a/vendor/github.com/99designs/gqlgen/internal/code/compare.go
+++ b/vendor/github.com/99designs/gqlgen/internal/code/compare.go
@@ -8,6 +8,8 @@ import (
// CompatibleTypes isnt a strict comparison, it allows for pointer differences
func CompatibleTypes(expected, actual types.Type) error {
+ // Unwrap any aliases
+ expected, actual = Unalias(expected), Unalias(actual)
// Special case to deal with pointer mismatches
{
expectedPtr, expectedIsPtr := expected.(*types.Pointer)
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go
new file mode 100644
index 00000000..8571db1f
--- /dev/null
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go
@@ -0,0 +1,185 @@
+package federation
+
+import (
+ "github.com/99designs/gqlgen/codegen/config"
+ "github.com/vektah/gqlparser/v2/ast"
+)
+
+// The name of the field argument that is injected into the resolver to support @requires.
+const fieldArgRequires = "_federationRequires"
+
+// The name of the scalar type used in the injected field argument to support @requires.
+const mapTypeName = "_RequiresMap"
+
+// The @key directive that defines the key fields for an entity.
+const dirNameKey = "key"
+
+// The @requires directive that defines the required fields for an entity to be resolved.
+const dirNameRequires = "requires"
+
+// The @entityResolver directive allows users to specify entity resolvers as batch lookups
+const dirNameEntityResolver = "entityResolver"
+
+const dirNamePopulateFromRepresentations = "populateFromRepresentations"
+
+var populateFromRepresentationsImplementation = `func(ctx context.Context, obj any, next graphql.Resolver) (res any, err error) {
+ fc := graphql.GetFieldContext(ctx)
+
+ // We get the Federation representations argument from the _entities resolver
+ representations, ok := fc.Parent.Parent.Args["representations"].([]map[string]any)
+ if !ok {
+ return nil, errors.New("must be called from within _entities")
+ }
+
+ // Get the index of the current entity in the representations list. This is
+ // set by the execution context after the _entities resolver is called.
+ index := fc.Parent.Index
+ if index == nil {
+ return nil, errors.New("couldn't find input index for entity")
+ }
+
+ if len(representations) < *index {
+ return nil, errors.New("representation not found")
+ }
+
+ return representations[*index], nil
+}`
+
+const DirNameEntityReference = "entityReference"
+
+// The fields arguments must be provided to both key and requires directives.
+const DirArgFields = "fields"
+
+// Tells the code generator what type the directive is referencing
+const DirArgType = "type"
+
+// The file name for Federation directives
+const dirGraphQLQFile = "federation/directives.graphql"
+
+// The file name for Federation entities
+const entityGraphQLQFile = "federation/entity.graphql"
+
+const federationVersion1Schema = `
+ directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE
+ directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
+ directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
+ directive @extends on OBJECT | INTERFACE
+ directive @external on FIELD_DEFINITION
+ scalar _Any
+ scalar _FieldSet
+`
+
+const federationVersion2Schema = `
+ directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM
+ directive @composeDirective(name: String!) repeatable on SCHEMA
+ directive @extends on OBJECT | INTERFACE
+ directive @external on OBJECT | FIELD_DEFINITION
+ directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
+ directive @inaccessible on
+ | ARGUMENT_DEFINITION
+ | ENUM
+ | ENUM_VALUE
+ | FIELD_DEFINITION
+ | INPUT_FIELD_DEFINITION
+ | INPUT_OBJECT
+ | INTERFACE
+ | OBJECT
+ | SCALAR
+ | UNION
+ directive @interfaceObject on OBJECT
+ directive @link(import: [String!], url: String!) repeatable on SCHEMA
+ directive @override(from: String!, label: String) on FIELD_DEFINITION
+ directive @policy(policies: [[federation__Policy!]!]!) on
+ | FIELD_DEFINITION
+ | OBJECT
+ | INTERFACE
+ | SCALAR
+ | ENUM
+ directive @provides(fields: FieldSet!) on FIELD_DEFINITION
+ directive @requires(fields: FieldSet!) on FIELD_DEFINITION
+ directive @requiresScopes(scopes: [[federation__Scope!]!]!) on
+ | FIELD_DEFINITION
+ | OBJECT
+ | INTERFACE
+ | SCALAR
+ | ENUM
+ directive @shareable repeatable on FIELD_DEFINITION | OBJECT
+ directive @tag(name: String!) repeatable on
+ | ARGUMENT_DEFINITION
+ | ENUM
+ | ENUM_VALUE
+ | FIELD_DEFINITION
+ | INPUT_FIELD_DEFINITION
+ | INPUT_OBJECT
+ | INTERFACE
+ | OBJECT
+ | SCALAR
+ | UNION
+ scalar _Any
+ scalar FieldSet
+ scalar federation__Policy
+ scalar federation__Scope
+`
+
+var builtins = config.TypeMap{
+ "_Service": {
+ Model: config.StringList{
+ "github.com/99designs/gqlgen/plugin/federation/fedruntime.Service",
+ },
+ },
+ "_Entity": {
+ Model: config.StringList{
+ "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
+ },
+ },
+ "Entity": {
+ Model: config.StringList{
+ "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
+ },
+ },
+ "_Any": {
+ Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
+ },
+ "federation__Scope": {
+ Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
+ },
+ "federation__Policy": {
+ Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
+ },
+}
+
+var dirPopulateFromRepresentations = &ast.DirectiveDefinition{
+ Name: dirNamePopulateFromRepresentations,
+ IsRepeatable: false,
+ Description: `This is a runtime directive used to implement @requires. It's automatically placed
+on the generated _federationRequires argument, and the implementation of it extracts the
+correct value from the input representations list.`,
+ Locations: []ast.DirectiveLocation{ast.LocationArgumentDefinition},
+ Position: &ast.Position{Src: &ast.Source{
+ Name: dirGraphQLQFile,
+ }},
+}
+
+var dirEntityReference = &ast.DirectiveDefinition{
+ Name: DirNameEntityReference,
+ IsRepeatable: false,
+ Description: `This is a compile-time directive used to implement @requires.
+It tells the code generator how to generate the model for the scalar.`,
+ Locations: []ast.DirectiveLocation{ast.LocationScalar},
+ Arguments: ast.ArgumentDefinitionList{
+ {
+ Name: DirArgType,
+ Type: ast.NonNullNamedType("String", nil),
+ Description: `The name of the entity that the fields selection
+set should be validated against.`,
+ },
+ {
+ Name: DirArgFields,
+ Type: ast.NonNullNamedType("FieldSet", nil),
+ Description: "The selection that the scalar should generate into.",
+ },
+ },
+ Position: &ast.Position{Src: &ast.Source{
+ Name: dirGraphQLQFile,
+ }},
+}
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go b/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go
index 4e9e1afc..562d86c5 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go
@@ -22,10 +22,12 @@ type Entity struct {
}
type EntityResolver struct {
- ResolverName string // The resolver name, such as FindUserByID
- KeyFields []*KeyField // The fields declared in @key.
- InputType types.Type // The Go generated input type for multi entity resolvers
- InputTypeName string
+ ResolverName string // The resolver name, such as FindUserByID
+ KeyFields []*KeyField // The fields declared in @key.
+ InputType types.Type // The Go generated input type for multi entity resolvers
+ InputTypeName string
+ ReturnType types.Type // The Go generated return type for the entity
+ ReturnTypeName string
}
func (e *EntityResolver) LookupInputType() string {
@@ -60,7 +62,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio
if federationVersion != 2 {
return false
}
- // TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entitiy union.
+ // TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entity union.
// The current implementation is a less drastic departure from the previous behavior, but should probably be reviewed.
// See https://www.apollographql.com/docs/federation/subgraph-spec/
if e.isResolvable() {
@@ -76,7 +78,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio
// Determine if the entity is resolvable.
func (e *Entity) isResolvable() bool {
- key := e.Def.Directives.ForName("key")
+ key := e.Def.Directives.ForName(dirNameKey)
if key == nil {
// If there is no key directive, the entity is resolvable.
return true
@@ -102,11 +104,11 @@ func (e *Entity) isKeyField(field *ast.FieldDefinition) bool {
// Get the key fields for this entity.
func (e *Entity) keyFields() []string {
- key := e.Def.Directives.ForName("key")
+ key := e.Def.Directives.ForName(dirNameKey)
if key == nil {
return []string{}
}
- fields := key.Arguments.ForName("fields")
+ fields := key.Arguments.ForName(DirArgFields)
if fields == nil {
return []string{}
}
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
index fd33255b..f7a4d6be 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
@@ -2,6 +2,7 @@ package federation
import (
_ "embed"
+ "errors"
"fmt"
"sort"
"strings"
@@ -12,7 +13,6 @@ import (
"github.com/99designs/gqlgen/codegen/config"
"github.com/99designs/gqlgen/codegen/templates"
"github.com/99designs/gqlgen/internal/rewrite"
- "github.com/99designs/gqlgen/plugin"
"github.com/99designs/gqlgen/plugin/federation/fieldset"
)
@@ -22,56 +22,81 @@ var federationTemplate string
//go:embed requires.gotpl
var explicitRequiresTemplate string
-type federation struct {
+type Federation struct {
Entities []*Entity
- Version int
- PackageOptions map[string]bool
+ PackageOptions PackageOptions
+
+ version int
+
+ // true if @requires is used in the schema
+ usesRequires bool
+}
+
+type PackageOptions struct {
+ // ExplicitRequires will generate a function in the execution context
+ // to populate fields using the @required directive into the entity.
+ //
+ // You can only set one of ExplicitRequires or ComputedRequires to true.
+ ExplicitRequires bool
+ // ComputedRequires generates resolver functions to compute values for
+ // fields using the @required directive.
+ ComputedRequires bool
}
// New returns a federation plugin that injects
// federated directives and types into the schema
-func New(version int) plugin.Plugin {
+func New(version int, cfg *config.Config) (*Federation, error) {
if version == 0 {
version = 1
}
- return &federation{Version: version}
+ options, err := buildPackageOptions(cfg)
+ if err != nil {
+ return nil, fmt.Errorf("invalid federation package options: %w", err)
+ }
+ return &Federation{
+ version: version,
+ PackageOptions: options,
+ }, nil
+}
+
+func buildPackageOptions(cfg *config.Config) (PackageOptions, error) {
+ packageOptions := cfg.Federation.Options
+
+ explicitRequires := packageOptions["explicit_requires"]
+ computedRequires := packageOptions["computed_requires"]
+ if explicitRequires && computedRequires {
+ return PackageOptions{}, errors.New("only one of explicit_requires or computed_requires can be set to true")
+ }
+
+ if computedRequires {
+ if cfg.Federation.Version != 2 {
+ return PackageOptions{}, errors.New("when using federation.options.computed_requires you must be using Federation 2")
+ }
+
+ // We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure
+ // our directive is always called.
+ if !cfg.CallArgumentDirectivesWithNull {
+ return PackageOptions{}, errors.New("when using federation.options.computed_requires, call_argument_directives_with_null must be set to true")
+ }
+ }
+
+ // We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure
+ // our directive is always called.
+
+ return PackageOptions{
+ ExplicitRequires: explicitRequires,
+ ComputedRequires: computedRequires,
+ }, nil
}
// Name returns the plugin name
-func (f *federation) Name() string {
+func (f *Federation) Name() string {
return "federation"
}
// MutateConfig mutates the configuration
-func (f *federation) MutateConfig(cfg *config.Config) error {
- builtins := config.TypeMap{
- "_Service": {
- Model: config.StringList{
- "github.com/99designs/gqlgen/plugin/federation/fedruntime.Service",
- },
- },
- "_Entity": {
- Model: config.StringList{
- "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
- },
- },
- "Entity": {
- Model: config.StringList{
- "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity",
- },
- },
- "_Any": {
- Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
- },
- "federation__Scope": {
- Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
- },
- "federation__Policy": {
- Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"},
- },
- }
-
+func (f *Federation) MutateConfig(cfg *config.Config) error {
for typeName, entry := range builtins {
if cfg.Models.Exists(typeName) {
return fmt.Errorf("%v already exists which must be reserved when Federation is enabled", typeName)
@@ -79,13 +104,14 @@ func (f *federation) MutateConfig(cfg *config.Config) error {
cfg.Models[typeName] = entry
}
cfg.Directives["external"] = config.DirectiveConfig{SkipRuntime: true}
- cfg.Directives["requires"] = config.DirectiveConfig{SkipRuntime: true}
+ cfg.Directives[dirNameRequires] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["provides"] = config.DirectiveConfig{SkipRuntime: true}
- cfg.Directives["key"] = config.DirectiveConfig{SkipRuntime: true}
+ cfg.Directives[dirNameKey] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["extends"] = config.DirectiveConfig{SkipRuntime: true}
+ cfg.Directives[dirNameEntityResolver] = config.DirectiveConfig{SkipRuntime: true}
// Federation 2 specific directives
- if f.Version == 2 {
+ if f.version == 2 {
cfg.Directives["shareable"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["link"] = config.DirectiveConfig{SkipRuntime: true}
cfg.Directives["tag"] = config.DirectiveConfig{SkipRuntime: true}
@@ -98,95 +124,48 @@ func (f *federation) MutateConfig(cfg *config.Config) error {
cfg.Directives["composeDirective"] = config.DirectiveConfig{SkipRuntime: true}
}
+ if f.usesRequires && f.PackageOptions.ComputedRequires {
+ cfg.Schema.Directives[dirPopulateFromRepresentations.Name] = dirPopulateFromRepresentations
+ cfg.Directives[dirPopulateFromRepresentations.Name] = config.DirectiveConfig{Implementation: &populateFromRepresentationsImplementation}
+
+ cfg.Schema.Directives[dirEntityReference.Name] = dirEntityReference
+ cfg.Directives[dirEntityReference.Name] = config.DirectiveConfig{SkipRuntime: true}
+
+ f.addMapType(cfg)
+ f.mutateSchemaForRequires(cfg.Schema, cfg)
+ }
+
return nil
}
-func (f *federation) InjectSourceEarly() *ast.Source {
+func (f *Federation) InjectSourcesEarly() ([]*ast.Source, error) {
input := ``
// add version-specific changes on key directive, as well as adding the new directives for federation 2
- if f.Version == 1 {
- input += `
- directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE
- directive @requires(fields: _FieldSet!) on FIELD_DEFINITION
- directive @provides(fields: _FieldSet!) on FIELD_DEFINITION
- directive @extends on OBJECT | INTERFACE
- directive @external on FIELD_DEFINITION
- scalar _Any
- scalar _FieldSet
-`
- } else if f.Version == 2 {
- input += `
- directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM
- directive @composeDirective(name: String!) repeatable on SCHEMA
- directive @extends on OBJECT | INTERFACE
- directive @external on OBJECT | FIELD_DEFINITION
- directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE
- directive @inaccessible on
- | ARGUMENT_DEFINITION
- | ENUM
- | ENUM_VALUE
- | FIELD_DEFINITION
- | INPUT_FIELD_DEFINITION
- | INPUT_OBJECT
- | INTERFACE
- | OBJECT
- | SCALAR
- | UNION
- directive @interfaceObject on OBJECT
- directive @link(import: [String!], url: String!) repeatable on SCHEMA
- directive @override(from: String!, label: String) on FIELD_DEFINITION
- directive @policy(policies: [[federation__Policy!]!]!) on
- | FIELD_DEFINITION
- | OBJECT
- | INTERFACE
- | SCALAR
- | ENUM
- directive @provides(fields: FieldSet!) on FIELD_DEFINITION
- directive @requires(fields: FieldSet!) on FIELD_DEFINITION
- directive @requiresScopes(scopes: [[federation__Scope!]!]!) on
- | FIELD_DEFINITION
- | OBJECT
- | INTERFACE
- | SCALAR
- | ENUM
- directive @shareable repeatable on FIELD_DEFINITION | OBJECT
- directive @tag(name: String!) repeatable on
- | ARGUMENT_DEFINITION
- | ENUM
- | ENUM_VALUE
- | FIELD_DEFINITION
- | INPUT_FIELD_DEFINITION
- | INPUT_OBJECT
- | INTERFACE
- | OBJECT
- | SCALAR
- | UNION
- scalar _Any
- scalar FieldSet
- scalar federation__Policy
- scalar federation__Scope
-`
+ if f.version == 1 {
+ input += federationVersion1Schema
+ } else if f.version == 2 {
+ input += federationVersion2Schema
}
- return &ast.Source{
- Name: "federation/directives.graphql",
+
+ return []*ast.Source{{
+ Name: dirGraphQLQFile,
Input: input,
BuiltIn: true,
- }
+ }}, nil
}
// InjectSourceLate creates a GraphQL Entity type with all
// the fields that had the @key directive
-func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
- f.setEntities(schema)
+func (f *Federation) InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error) {
+ f.Entities = f.buildEntities(schema, f.version)
- var entities, resolvers, entityResolverInputDefinitions string
+ entities := make([]string, 0)
+ resolvers := make([]string, 0)
+ entityResolverInputDefinitions := make([]string, 0)
for _, e := range f.Entities {
if e.Def.Kind != ast.Interface {
- if entities != "" {
- entities += " | "
- }
- entities += e.Name
+ entities = append(entities, e.Name)
} else if len(schema.GetPossibleTypes(e.Def)) == 0 {
fmt.Println(
"skipping @key field on interface " + e.Def.Name + " as no types implement it",
@@ -194,48 +173,33 @@ func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
}
for _, r := range e.Resolvers {
- if e.Multi {
- if entityResolverInputDefinitions != "" {
- entityResolverInputDefinitions += "\n\n"
- }
- entityResolverInputDefinitions += "input " + r.InputTypeName + " {\n"
- for _, keyField := range r.KeyFields {
- entityResolverInputDefinitions += fmt.Sprintf(
- "\t%s: %s\n",
- keyField.Field.ToGo(),
- keyField.Definition.Type.String(),
- )
- }
- entityResolverInputDefinitions += "}"
- resolvers += fmt.Sprintf("\t%s(reps: [%s]!): [%s]\n", r.ResolverName, r.InputTypeName, e.Name)
- } else {
- resolverArgs := ""
- for _, keyField := range r.KeyFields {
- resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String())
- }
- resolvers += fmt.Sprintf("\t%s(%s): %s!\n", r.ResolverName, resolverArgs, e.Name)
+ resolverSDL, entityResolverInputSDL := buildResolverSDL(r, e.Multi)
+ resolvers = append(resolvers, resolverSDL)
+ if entityResolverInputSDL != "" {
+ entityResolverInputDefinitions = append(entityResolverInputDefinitions, entityResolverInputSDL)
}
}
}
var blocks []string
- if entities != "" {
- entities = `# a union of all types that use the @key directive
-union _Entity = ` + entities
- blocks = append(blocks, entities)
+ if len(entities) > 0 {
+ entitiesSDL := `# a union of all types that use the @key directive
+union _Entity = ` + strings.Join(entities, " | ")
+ blocks = append(blocks, entitiesSDL)
}
// resolvers can be empty if a service defines only "empty
// extend" types. This should be rare.
- if resolvers != "" {
- if entityResolverInputDefinitions != "" {
- blocks = append(blocks, entityResolverInputDefinitions)
+ if len(resolvers) > 0 {
+ if len(entityResolverInputDefinitions) > 0 {
+ inputSDL := strings.Join(entityResolverInputDefinitions, "\n\n")
+ blocks = append(blocks, inputSDL)
}
- resolvers = `# fake type to build resolver interfaces for users to implement
+ resolversSDL := `# fake type to build resolver interfaces for users to implement
type Entity {
- ` + resolvers + `
+` + strings.Join(resolvers, "\n") + `
}`
- blocks = append(blocks, resolvers)
+ blocks = append(blocks, resolversSDL)
}
_serviceTypeDef := `type _Service {
@@ -259,14 +223,14 @@ type Entity {
}`
blocks = append(blocks, extendTypeQueryDef)
- return &ast.Source{
- Name: "federation/entity.graphql",
+ return []*ast.Source{{
+ Name: entityGraphQLQFile,
BuiltIn: true,
Input: "\n" + strings.Join(blocks, "\n\n") + "\n",
- }
+ }}, nil
}
-func (f *federation) GenerateCode(data *codegen.Data) error {
+func (f *Federation) GenerateCode(data *codegen.Data) error {
// requires imports
requiresImports := make(map[string]bool, 0)
requiresImports["context"] = true
@@ -275,7 +239,11 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
requiresEntities := make(map[string]*Entity, 0)
// Save package options on f for template use
- f.PackageOptions = data.Config.Federation.Options
+ packageOptions, err := buildPackageOptions(data.Config)
+ if err != nil {
+ return fmt.Errorf("invalid federation package options: %w", err)
+ }
+ f.PackageOptions = packageOptions
if len(f.Entities) > 0 {
if data.Objects.ByName("Entity") != nil {
@@ -295,18 +263,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
}
for _, r := range e.Resolvers {
- // fill in types for key fields
- //
- for _, keyField := range r.KeyFields {
- if len(keyField.Field) == 0 {
- fmt.Println(
- "skipping @key field " + keyField.Definition.Name + " in " + r.ResolverName + " in " + e.Def.Name,
- )
- continue
- }
- cgField := keyField.Field.TypeReference(obj, data.Objects)
- keyField.Type = cgField.TypeReference
- }
+ populateKeyFieldTypes(r, obj, data.Objects, e.Def.Name)
}
// fill in types for requires fields
@@ -348,69 +305,12 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
}
}
- if data.Config.Federation.Options["explicit_requires"] && len(requiresEntities) > 0 {
- // check for existing requires functions
- type Populator struct {
- FuncName string
- Exists bool
- Comment string
- Implementation string
- Entity *Entity
- }
- populators := make([]Populator, 0)
-
- rewriter, err := rewrite.New(data.Config.Federation.Dir())
- if err != nil {
- return err
- }
-
- for name, entity := range requiresEntities {
- populator := Populator{
- FuncName: fmt.Sprintf("Populate%sRequires", name),
- Entity: entity,
- }
-
- populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`))
- populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName))
-
- if populator.Implementation == "" {
- populator.Exists = false
- populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName)
- }
- populators = append(populators, populator)
- }
-
- sort.Slice(populators, func(i, j int) bool {
- return populators[i].FuncName < populators[j].FuncName
- })
-
- requiresFile := data.Config.Federation.Dir() + "/federation.requires.go"
- existingImports := rewriter.ExistingImports(requiresFile)
- for _, imp := range existingImports {
- if imp.Alias == "" {
- // import exists in both places, remove
- delete(requiresImports, imp.ImportPath)
- }
- }
-
- for k := range requiresImports {
- existingImports = append(existingImports, rewrite.Import{ImportPath: k})
- }
-
- // render requires populators
- err = templates.Render(templates.Options{
- PackageName: data.Config.Federation.Package,
- Filename: requiresFile,
- Data: struct {
- federation
- ExistingImports []rewrite.Import
- Populators []Populator
- OriginalSource string
- }{*f, existingImports, populators, ""},
- GeneratedHeader: false,
- Packages: data.Config.Packages,
- Template: explicitRequiresTemplate,
- })
+ if f.PackageOptions.ExplicitRequires && len(requiresEntities) > 0 {
+ err := f.generateExplicitRequires(
+ data,
+ requiresEntities,
+ requiresImports,
+ )
if err != nil {
return err
}
@@ -420,7 +320,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
PackageName: data.Config.Federation.Package,
Filename: data.Config.Federation.Filename,
Data: struct {
- federation
+ Federation
UsePointers bool
}{*f, data.Config.ResolversAlwaysReturnPointers},
GeneratedHeader: true,
@@ -429,137 +329,227 @@ func (f *federation) GenerateCode(data *codegen.Data) error {
})
}
-func (f *federation) setEntities(schema *ast.Schema) {
- for _, schemaType := range schema.Types {
- keys, ok := isFederatedEntity(schemaType)
- if !ok {
+// Fill in types for key fields
+func populateKeyFieldTypes(
+ resolver *EntityResolver,
+ obj *codegen.Object,
+ allObjects codegen.Objects,
+ name string,
+) {
+ for _, keyField := range resolver.KeyFields {
+ if len(keyField.Field) == 0 {
+ fmt.Println(
+ "skipping @key field " + keyField.Definition.Name + " in " + resolver.ResolverName + " in " + name,
+ )
continue
}
+ cgField := keyField.Field.TypeReference(obj, allObjects)
+ keyField.Type = cgField.TypeReference
+ }
+}
- if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) {
- fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name)
- continue
+func (f *Federation) buildEntities(schema *ast.Schema, version int) []*Entity {
+ entities := make([]*Entity, 0)
+ for _, schemaType := range schema.Types {
+ entity := f.buildEntity(schemaType, schema, version)
+ if entity != nil {
+ entities = append(entities, entity)
+ }
+ }
+
+ // make sure order remains stable across multiple builds
+ sort.Slice(entities, func(i, j int) bool {
+ return entities[i].Name < entities[j].Name
+ })
+
+ return entities
+}
+
+func (f *Federation) buildEntity(
+ schemaType *ast.Definition,
+ schema *ast.Schema,
+ version int,
+) *Entity {
+ keys, ok := isFederatedEntity(schemaType)
+ if !ok {
+ return nil
+ }
+
+ if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) {
+ fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name)
+ return nil
+ }
+
+ entity := &Entity{
+ Name: schemaType.Name,
+ Def: schemaType,
+ Resolvers: nil,
+ Requires: nil,
+ Multi: isMultiEntity(schemaType),
+ }
+
+ // If our schema has a field with a type defined in
+ // another service, then we need to define an "empty
+ // extend" of that type in this service, so this service
+ // knows what the type is like. But the graphql-server
+ // will never ask us to actually resolve this "empty
+ // extend", so we don't require a resolver function for
+ // it. (Well, it will never ask in practice; it's
+ // unclear whether the spec guarantees this. See
+ // https://github.com/apollographql/apollo-server/issues/3852
+ // ). Example:
+ // type MyType {
+ // myvar: TypeDefinedInOtherService
+ // }
+ // // Federation needs this type, but
+ // // it doesn't need a resolver for it!
+ // extend TypeDefinedInOtherService @key(fields: "id") {
+ // id: ID @external
+ // }
+ if entity.allFieldsAreExternal(version) {
+ return entity
+ }
+
+ entity.Resolvers = buildResolvers(schemaType, schema, keys, entity.Multi)
+ entity.Requires = buildRequires(schemaType)
+ if len(entity.Requires) > 0 {
+ f.usesRequires = true
+ }
+
+ return entity
+}
+
+func isMultiEntity(schemaType *ast.Definition) bool {
+ dir := schemaType.Directives.ForName(dirNameEntityResolver)
+ if dir == nil {
+ return false
+ }
+
+ if dirArg := dir.Arguments.ForName("multi"); dirArg != nil {
+ if dirVal, err := dirArg.Value.Value(nil); err == nil {
+ return dirVal.(bool)
}
+ }
+
+ return false
+}
- e := &Entity{
- Name: schemaType.Name,
- Def: schemaType,
- Resolvers: nil,
- Requires: nil,
+func buildResolvers(
+ schemaType *ast.Definition,
+ schema *ast.Schema,
+ keys []*ast.Directive,
+ multi bool,
+) []*EntityResolver {
+ resolvers := make([]*EntityResolver, 0)
+ for _, dir := range keys {
+ if len(dir.Arguments) > 2 {
+ panic("More than two arguments provided for @key declaration.")
+ }
+ keyFields, resolverFields := buildKeyFields(
+ schemaType,
+ schema,
+ dir,
+ )
+
+ resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And")
+ var resolverName string
+ if multi {
+ resolverFieldsToGo += "s" // Pluralize for better API readability
+ resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo)
+ } else {
+ resolverName = fmt.Sprintf("find%s", resolverFieldsToGo)
}
- // Let's process custom entity resolver settings.
- dir := schemaType.Directives.ForName("entityResolver")
- if dir != nil {
- if dirArg := dir.Arguments.ForName("multi"); dirArg != nil {
- if dirVal, err := dirArg.Value.Value(nil); err == nil {
- e.Multi = dirVal.(bool)
- }
+ resolvers = append(resolvers, &EntityResolver{
+ ResolverName: resolverName,
+ KeyFields: keyFields,
+ InputTypeName: resolverFieldsToGo + "Input",
+ ReturnTypeName: schemaType.Name,
+ })
+ }
+
+ return resolvers
+}
+
+func extractFields(
+ dir *ast.Directive,
+) (string, error) {
+ var arg *ast.Argument
+
+ // since directives are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="")
+ for _, a := range dir.Arguments {
+ if a.Name == DirArgFields {
+ if arg != nil {
+ return "", errors.New("more than one \"fields\" argument provided for declaration")
}
+ arg = a
}
+ }
- // If our schema has a field with a type defined in
- // another service, then we need to define an "empty
- // extend" of that type in this service, so this service
- // knows what the type is like. But the graphql-server
- // will never ask us to actually resolve this "empty
- // extend", so we don't require a resolver function for
- // it. (Well, it will never ask in practice; it's
- // unclear whether the spec guarantees this. See
- // https://github.com/apollographql/apollo-server/issues/3852
- // ). Example:
- // type MyType {
- // myvar: TypeDefinedInOtherService
- // }
- // // Federation needs this type, but
- // // it doesn't need a resolver for it!
- // extend TypeDefinedInOtherService @key(fields: "id") {
- // id: ID @external
- // }
- if !e.allFieldsAreExternal(f.Version) {
- for _, dir := range keys {
- if len(dir.Arguments) > 2 {
- panic("More than two arguments provided for @key declaration.")
- }
- var arg *ast.Argument
-
- // since keys are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="")
- for _, a := range dir.Arguments {
- if a.Name == "fields" {
- if arg != nil {
- panic("More than one `fields` provided for @key declaration.")
- }
- arg = a
- }
- }
+ return arg.Value.Raw, nil
+}
- keyFieldSet := fieldset.New(arg.Value.Raw, nil)
+func buildKeyFields(
+ schemaType *ast.Definition,
+ schema *ast.Schema,
+ dir *ast.Directive,
+) ([]*KeyField, []string) {
+ fieldsRaw, err := extractFields(dir)
+ if err != nil {
+ panic("More than one `fields` argument provided for declaration.")
+ }
- keyFields := make([]*KeyField, len(keyFieldSet))
- resolverFields := []string{}
- for i, field := range keyFieldSet {
- def := field.FieldDefinition(schemaType, schema)
+ keyFieldSet := fieldset.New(fieldsRaw, nil)
- if def == nil {
- panic(fmt.Sprintf("no field for %v", field))
- }
+ keyFields := make([]*KeyField, len(keyFieldSet))
+ resolverFields := []string{}
+ for i, field := range keyFieldSet {
+ def := field.FieldDefinition(schemaType, schema)
- keyFields[i] = &KeyField{Definition: def, Field: field}
- resolverFields = append(resolverFields, keyFields[i].Field.ToGo())
- }
+ if def == nil {
+ panic(fmt.Sprintf("no field for %v", field))
+ }
- resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And")
- var resolverName string
- if e.Multi {
- resolverFieldsToGo += "s" // Pluralize for better API readability
- resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo)
- } else {
- resolverName = fmt.Sprintf("find%s", resolverFieldsToGo)
- }
+ keyFields[i] = &KeyField{Definition: def, Field: field}
+ resolverFields = append(resolverFields, keyFields[i].Field.ToGo())
+ }
- e.Resolvers = append(e.Resolvers, &EntityResolver{
- ResolverName: resolverName,
- KeyFields: keyFields,
- InputTypeName: resolverFieldsToGo + "Input",
- })
- }
+ return keyFields, resolverFields
+}
- e.Requires = []*Requires{}
- for _, f := range schemaType.Fields {
- dir := f.Directives.ForName("requires")
- if dir == nil {
- continue
- }
- if len(dir.Arguments) != 1 || dir.Arguments[0].Name != "fields" {
- panic("Exactly one `fields` argument needed for @requires declaration.")
- }
- requiresFieldSet := fieldset.New(dir.Arguments[0].Value.Raw, nil)
- for _, field := range requiresFieldSet {
- e.Requires = append(e.Requires, &Requires{
- Name: field.ToGoPrivate(),
- Field: field,
- })
- }
- }
+func buildRequires(schemaType *ast.Definition) []*Requires {
+ requires := make([]*Requires, 0)
+ for _, f := range schemaType.Fields {
+ dir := f.Directives.ForName(dirNameRequires)
+ if dir == nil {
+ continue
+ }
+
+ fieldsRaw, err := extractFields(dir)
+ if err != nil {
+ panic("Exactly one `fields` argument needed for @requires declaration.")
+ }
+ requiresFieldSet := fieldset.New(fieldsRaw, nil)
+ for _, field := range requiresFieldSet {
+ requires = append(requires, &Requires{
+ Name: field.ToGoPrivate(),
+ Field: field,
+ })
}
- f.Entities = append(f.Entities, e)
}
- // make sure order remains stable across multiple builds
- sort.Slice(f.Entities, func(i, j int) bool {
- return f.Entities[i].Name < f.Entities[j].Name
- })
+ return requires
}
func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) {
switch schemaType.Kind {
case ast.Object:
- keys := schemaType.Directives.ForNames("key")
+ keys := schemaType.Directives.ForNames(dirNameKey)
if len(keys) > 0 {
return keys, true
}
case ast.Interface:
- keys := schemaType.Directives.ForNames("key")
+ keys := schemaType.Directives.ForNames(dirNameKey)
if len(keys) > 0 {
return keys, true
}
@@ -577,3 +567,146 @@ func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) {
}
return nil, false
}
+
+func (f *Federation) generateExplicitRequires(
+ data *codegen.Data,
+ requiresEntities map[string]*Entity,
+ requiresImports map[string]bool,
+) error {
+ // check for existing requires functions
+ type Populator struct {
+ FuncName string
+ Exists bool
+ Comment string
+ Implementation string
+ Entity *Entity
+ }
+ populators := make([]Populator, 0)
+
+ rewriter, err := rewrite.New(data.Config.Federation.Dir())
+ if err != nil {
+ return err
+ }
+
+ for name, entity := range requiresEntities {
+ populator := Populator{
+ FuncName: fmt.Sprintf("Populate%sRequires", name),
+ Entity: entity,
+ }
+
+ populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`))
+ populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName))
+
+ if populator.Implementation == "" {
+ populator.Exists = false
+ populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName)
+ }
+ populators = append(populators, populator)
+ }
+
+ sort.Slice(populators, func(i, j int) bool {
+ return populators[i].FuncName < populators[j].FuncName
+ })
+
+ requiresFile := data.Config.Federation.Dir() + "/federation.requires.go"
+ existingImports := rewriter.ExistingImports(requiresFile)
+ for _, imp := range existingImports {
+ if imp.Alias == "" {
+ // import exists in both places, remove
+ delete(requiresImports, imp.ImportPath)
+ }
+ }
+
+ for k := range requiresImports {
+ existingImports = append(existingImports, rewrite.Import{ImportPath: k})
+ }
+
+ // render requires populators
+ return templates.Render(templates.Options{
+ PackageName: data.Config.Federation.Package,
+ Filename: requiresFile,
+ Data: struct {
+ Federation
+ ExistingImports []rewrite.Import
+ Populators []Populator
+ OriginalSource string
+ }{*f, existingImports, populators, ""},
+ GeneratedHeader: false,
+ Packages: data.Config.Packages,
+ Template: explicitRequiresTemplate,
+ })
+}
+
+func buildResolverSDL(
+ resolver *EntityResolver,
+ multi bool,
+) (resolverSDL, entityResolverInputSDL string) {
+ if multi {
+ entityResolverInputSDL = buildEntityResolverInputDefinitionSDL(resolver)
+ resolverSDL := fmt.Sprintf("\t%s(reps: [%s]!): [%s]", resolver.ResolverName, resolver.InputTypeName, resolver.ReturnTypeName)
+ return resolverSDL, entityResolverInputSDL
+ }
+
+ resolverArgs := ""
+ for _, keyField := range resolver.KeyFields {
+ resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String())
+ }
+ resolverSDL = fmt.Sprintf("\t%s(%s): %s!", resolver.ResolverName, resolverArgs, resolver.ReturnTypeName)
+ return resolverSDL, ""
+}
+
+func buildEntityResolverInputDefinitionSDL(resolver *EntityResolver) string {
+ entityResolverInputDefinition := "input " + resolver.InputTypeName + " {\n"
+ for _, keyField := range resolver.KeyFields {
+ entityResolverInputDefinition += fmt.Sprintf(
+ "\t%s: %s\n",
+ keyField.Field.ToGo(),
+ keyField.Definition.Type.String(),
+ )
+ }
+ return entityResolverInputDefinition + "}"
+}
+
+func (f *Federation) addMapType(cfg *config.Config) {
+ cfg.Models[mapTypeName] = config.TypeMapEntry{
+ Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"},
+ }
+ cfg.Schema.Types[mapTypeName] = &ast.Definition{
+ Kind: ast.Scalar,
+ Name: mapTypeName,
+ Description: "Maps an arbitrary GraphQL value to a map[string]any Go type.",
+ }
+}
+
+func (f *Federation) mutateSchemaForRequires(
+ schema *ast.Schema,
+ cfg *config.Config,
+) {
+ for _, schemaType := range schema.Types {
+ for _, field := range schemaType.Fields {
+ if dir := field.Directives.ForName(dirNameRequires); dir != nil {
+ // ensure we always generate a resolver for any @requires field
+ model := cfg.Models[schemaType.Name]
+ fieldConfig := model.Fields[field.Name]
+ fieldConfig.Resolver = true
+ if model.Fields == nil {
+ model.Fields = make(map[string]config.TypeMapField)
+ }
+ model.Fields[field.Name] = fieldConfig
+ cfg.Models[schemaType.Name] = model
+
+ requiresArgument := &ast.ArgumentDefinition{
+ Name: fieldArgRequires,
+ Type: ast.NamedType(mapTypeName, nil),
+ Directives: ast.DirectiveList{
+ {
+ Name: dirNamePopulateFromRepresentations,
+ Definition: dirPopulateFromRepresentations,
+ },
+ },
+ }
+ field.Arguments = append(field.Arguments, requiresArgument)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl
index 119bab5b..fdb40a6a 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl
@@ -36,15 +36,50 @@ func (ec *executionContext) __resolve__service(ctx context.Context) (fedruntime.
func (ec *executionContext) __resolve_entities(ctx context.Context, representations []map[string]interface{}) []fedruntime.Entity {
list := make([]fedruntime.Entity, len(representations))
- repsMap := map[string]struct {
- i []int
- r []map[string]interface{}
- }{}
+ repsMap := ec.buildRepresentationGroups(ctx, representations)
+
+ switch len(repsMap) {
+ case 0:
+ return list
+ case 1:
+ for typeName, reps := range repsMap {
+ ec.resolveEntityGroup(ctx, typeName, reps, list)
+ }
+ return list
+ default:
+ var g sync.WaitGroup
+ g.Add(len(repsMap))
+ for typeName, reps := range repsMap {
+ go func(typeName string, reps []EntityWithIndex) {
+ ec.resolveEntityGroup(ctx, typeName, reps, list)
+ g.Done()
+ }(typeName, reps)
+ }
+ g.Wait()
+ return list
+ }
+}
+
+type EntityWithIndex struct {
+ // The index in the original representation array
+ index int
+ entity EntityRepresentation
+}
+
+// EntityRepresentation is the JSON representation of an entity sent by the Router
+// used as the inputs for us to resolve.
+//
+// We make it a map because we know the top level JSON is always an object.
+type EntityRepresentation map[string]any
// We group entities by typename so that we can parallelize their resolution.
// This is particularly helpful when there are entity groups in multi mode.
- buildRepresentationGroups := func(reps []map[string]interface{}) {
- for i, rep := range reps {
+func (ec *executionContext) buildRepresentationGroups(
+ ctx context.Context,
+ representations []map[string]any,
+) map[string][]EntityWithIndex {
+ repsMap := make(map[string][]EntityWithIndex)
+ for i, rep := range representations {
typeName, ok := rep["__typename"].(string)
if !ok {
// If there is no __typename, we just skip the representation;
@@ -53,14 +88,48 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
continue
}
- _r := repsMap[typeName]
- _r.i = append(_r.i, i)
- _r.r = append(_r.r, rep)
- repsMap[typeName] = _r
+ repsMap[typeName] = append(repsMap[typeName], EntityWithIndex{
+ index: i,
+ entity: rep,
+ })
+ }
+
+ return repsMap
+}
+
+func (ec *executionContext) resolveEntityGroup(
+ ctx context.Context,
+ typeName string,
+ reps []EntityWithIndex,
+ list []fedruntime.Entity,
+) {
+ if isMulti(typeName) {
+ err := ec.resolveManyEntities(ctx, typeName, reps, list)
+ if err != nil {
+ ec.Error(ctx, err)
+ }
+ } else {
+ // if there are multiple entities to resolve, parallelize (similar to
+ // graphql.FieldSet.Dispatch)
+ var e sync.WaitGroup
+ e.Add(len(reps))
+ for i, rep := range reps {
+ i, rep := i, rep
+ go func(i int, rep EntityWithIndex) {
+ entity, err := ec.resolveEntity(ctx, typeName, rep.entity)
+ if err != nil {
+ ec.Error(ctx, err)
+ } else {
+ list[rep.index] = entity
+ }
+ e.Done()
+ }(i, rep)
}
+ e.Wait()
}
+}
- isMulti := func(typeName string) bool {
+func isMulti(typeName string) bool {
switch typeName {
{{- range .Entities -}}
{{- if .Resolvers -}}
@@ -75,7 +144,11 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
}
}
- resolveEntity := func(ctx context.Context, typeName string, rep map[string]interface{}, idx []int, i int) (err error) {
+func (ec *executionContext) resolveEntity(
+ ctx context.Context,
+ typeName string,
+ rep EntityRepresentation,
+) (e fedruntime.Entity, err error) {
// we need to do our own panic handling, because we may be called in a
// goroutine, where the usual panic handling can't catch us
defer func () {
@@ -90,45 +163,51 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
case "{{.Def.Name}}":
resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, rep)
if err != nil {
- return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
+ return nil, fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
}
switch resolverName {
{{ range $i, $resolver := .Resolvers }}
case "{{.ResolverName}}":
{{- range $j, $keyField := .KeyFields }}
- id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
+ id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
- return fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err)
+ return nil, fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err)
}
{{- end}}
entity, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, {{- range $j, $_ := .KeyFields -}} id{{$j}}, {{end}})
if err != nil {
- return fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err)
+ return nil, fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err)
}
- {{ if and (index $options "explicit_requires") $entity.Requires }}
+ {{- if $options.ComputedRequires }}
+ {{/* We don't do anything in this case, computed requires are handled by standard resolvers */}}
+ {{- else if and $options.ExplicitRequires $entity.Requires }}
err = ec.Populate{{$entity.Def.Name}}Requires(ctx, {{- if (not $usePointers) -}}&{{- end -}}entity, rep)
if err != nil {
- return fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err)
+ return nil, fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err)
}
{{- else }}
{{ range $entity.Requires }}
entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
- return err
+ return nil, err
}
{{- end }}
{{- end }}
- list[idx[i]] = entity
- return nil
+ return entity, nil
{{- end }}
}
{{ end }}
{{- end }}
}
- return fmt.Errorf("%w: %s", ErrUnknownType, typeName)
+ return nil, fmt.Errorf("%w: %s", ErrUnknownType, typeName)
}
- resolveManyEntities := func(ctx context.Context, typeName string, reps []map[string]interface{}, idx []int) (err error) {
+func (ec *executionContext) resolveManyEntities(
+ ctx context.Context,
+ typeName string,
+ reps []EntityWithIndex,
+ list []fedruntime.Entity,
+) (err error) {
// we need to do our own panic handling, because we may be called in a
// goroutine, where the usual panic handling can't catch us
defer func () {
@@ -141,43 +220,43 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{ range $_, $entity := .Entities }}
{{ if and .Resolvers .Multi -}}
case "{{.Def.Name}}":
- resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0])
+ resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0].entity)
if err != nil {
return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err)
}
switch resolverName {
{{ range $i, $resolver := .Resolvers }}
case "{{.ResolverName}}":
- _reps := make([]*{{.LookupInputType}}, len(reps))
+ typedReps := make([]*{{.LookupInputType}}, len(reps))
for i, rep := range reps {
{{ range $i, $keyField := .KeyFields -}}
- id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"])
+ id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep.entity["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return errors.New(fmt.Sprintf("Field %s undefined in schema.", "{{.Definition.Name}}"))
}
{{end}}
- _reps[i] = &{{.LookupInputType}} {
+ typedReps[i] = &{{.LookupInputType}} {
{{ range $i, $keyField := .KeyFields -}}
{{$keyField.Field.ToGo}}: id{{$i}},
{{end}}
}
}
- entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, _reps)
+ entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, typedReps)
if err != nil {
return err
}
for i, entity := range entities {
{{- range $entity.Requires }}
- entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i]["{{.Field.Join `"].(map[string]interface{})["`}}"])
+ entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i].entity["{{.Field.Join `"].(map[string]interface{})["`}}"])
if err != nil {
return err
}
{{- end}}
- list[idx[i]] = entity
+ list[reps[i].index] = entity
}
return nil
{{ end }}
@@ -188,54 +267,6 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{- end }}
default:
return errors.New("unknown type: "+typeName)
- }
- }
-
- resolveEntityGroup := func(typeName string, reps []map[string]interface{}, idx []int) {
- if isMulti(typeName) {
- err := resolveManyEntities(ctx, typeName, reps, idx)
- if err != nil {
- ec.Error(ctx, err)
- }
- } else {
- // if there are multiple entities to resolve, parallelize (similar to
- // graphql.FieldSet.Dispatch)
- var e sync.WaitGroup
- e.Add(len(reps))
- for i, rep := range reps {
- i, rep := i, rep
- go func(i int, rep map[string]interface{}) {
- err := resolveEntity(ctx, typeName, rep, idx, i)
- if err != nil {
- ec.Error(ctx, err)
- }
- e.Done()
- }(i, rep)
- }
- e.Wait()
- }
- }
- buildRepresentationGroups(representations)
-
- switch len(repsMap) {
- case 0:
- return list
- case 1:
- for typeName, reps := range repsMap {
- resolveEntityGroup(typeName, reps.r, reps.i)
- }
- return list
- default:
- var g sync.WaitGroup
- g.Add(len(repsMap))
- for typeName, reps := range repsMap {
- go func(typeName string, reps []map[string]interface{}, idx []int) {
- resolveEntityGroup(typeName, reps, idx)
- g.Done()
- }(typeName, reps.r, reps.i)
- }
- g.Wait()
- return list
}
}
@@ -244,13 +275,13 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati
{{ range $_, $entity := .Entities }}
{{- if .Resolvers }}
- func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep map[string]interface{}) (string, error) {
+ func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep EntityRepresentation) (string, error) {
{{- range .Resolvers }}
for {
var (
- m map[string]interface{}
+ m EntityRepresentation
val interface{}
- ok bool
+ ok bool
)
_ = val
// if all of the KeyFields values for this resolver are null,
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md b/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md
index d5dd0628..e8888c1c 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md
@@ -18,7 +18,7 @@ TODO(miguel): add details.
# Entity resolvers - GetMany entities
-The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently optin tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g.
+The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently option tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g.
```
directive @entityResolver(multi: Boolean) on OBJECT
@@ -39,4 +39,4 @@ func (r *entityResolver) FindManyMultiHellosByName(ctx context.Context, reps []*
```
**Note:**
-If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true`
\ No newline at end of file
+If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true`
diff --git a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go
index 5f6ce94e..660e3537 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go
@@ -26,11 +26,6 @@ type (
// DefaultFieldMutateHook is the default hook for the Plugin which applies the GoFieldHook and GoTagFieldHook.
func DefaultFieldMutateHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) {
- var err error
- f, err = GoFieldHook(td, fd, f)
- if err != nil {
- return f, err
- }
return GoTagFieldHook(td, fd, f)
}
@@ -337,117 +332,139 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition)
binder := cfg.NewBinder()
fields := make([]*Field, 0)
- var omittableType types.Type
-
for _, field := range schemaType.Fields {
- var typ types.Type
- fieldDef := cfg.Schema.Types[field.Type.Name()]
-
- if cfg.Models.UserDefined(field.Type.Name()) {
- var err error
- typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0])
- if err != nil {
- return nil, err
- }
- } else {
- switch fieldDef.Kind {
- case ast.Scalar:
- // no user defined model, referencing a default scalar
- typ = types.NewNamed(
- types.NewTypeName(0, cfg.Model.Pkg(), "string", nil),
- nil,
- nil,
- )
-
- case ast.Interface, ast.Union:
- // no user defined model, referencing a generated interface type
- typ = types.NewNamed(
- types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
- types.NewInterfaceType([]*types.Func{}, []types.Type{}),
- nil,
- )
-
- case ast.Enum:
- // no user defined model, must reference a generated enum
- typ = types.NewNamed(
- types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
- nil,
- nil,
- )
-
- case ast.Object, ast.InputObject:
- // no user defined model, must reference a generated struct
- typ = types.NewNamed(
- types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
- types.NewStruct(nil, nil),
- nil,
- )
-
- default:
- panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind))
- }
+ f, err := m.generateField(cfg, binder, schemaType, field)
+ if err != nil {
+ return nil, err
}
- name := templates.ToGo(field.Name)
- if nameOveride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOveride != "" {
- name = nameOveride
+ if f == nil {
+ continue
}
- typ = binder.CopyModifiersFromAst(field.Type, typ)
+ fields = append(fields, f)
+ }
- if cfg.StructFieldsAlwaysPointers {
- if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) {
- typ = types.NewPointer(typ)
- }
+ fields = append(fields, getExtraFields(cfg, schemaType.Name)...)
+
+ return fields, nil
+}
+
+func (m *Plugin) generateField(
+ cfg *config.Config,
+ binder *config.Binder,
+ schemaType *ast.Definition,
+ field *ast.FieldDefinition,
+) (*Field, error) {
+ var omittableType types.Type
+ var typ types.Type
+ fieldDef := cfg.Schema.Types[field.Type.Name()]
+
+ if cfg.Models.UserDefined(field.Type.Name()) {
+ var err error
+ typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0])
+ if err != nil {
+ return nil, err
}
+ } else {
+ switch fieldDef.Kind {
+ case ast.Scalar:
+ // no user defined model, referencing a default scalar
+ typ = types.NewNamed(
+ types.NewTypeName(0, cfg.Model.Pkg(), "string", nil),
+ nil,
+ nil,
+ )
+
+ case ast.Interface, ast.Union:
+ // no user defined model, referencing a generated interface type
+ typ = types.NewNamed(
+ types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
+ types.NewInterfaceType([]*types.Func{}, []types.Type{}),
+ nil,
+ )
+
+ case ast.Enum:
+ // no user defined model, must reference a generated enum
+ typ = types.NewNamed(
+ types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
+ nil,
+ nil,
+ )
- f := &Field{
- Name: field.Name,
- GoName: name,
- Type: typ,
- Description: field.Description,
- Tag: getStructTagFromField(cfg, field),
- Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull,
+ case ast.Object, ast.InputObject:
+ // no user defined model, must reference a generated struct
+ typ = types.NewNamed(
+ types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil),
+ types.NewStruct(nil, nil),
+ nil,
+ )
+
+ default:
+ panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind))
}
+ }
- if m.FieldHook != nil {
- mf, err := m.FieldHook(schemaType, field, f)
- if err != nil {
- return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
- }
- f = mf
+ name := templates.ToGo(field.Name)
+ if nameOverride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOverride != "" {
+ name = nameOverride
+ }
+
+ typ = binder.CopyModifiersFromAst(field.Type, typ)
+
+ if cfg.StructFieldsAlwaysPointers {
+ if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) {
+ typ = types.NewPointer(typ)
}
+ }
- if f.IsResolver && cfg.OmitResolverFields {
- continue
+ f := &Field{
+ Name: field.Name,
+ GoName: name,
+ Type: typ,
+ Description: field.Description,
+ Tag: getStructTagFromField(cfg, field),
+ Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull,
+ IsResolver: cfg.Models[schemaType.Name].Fields[field.Name].Resolver,
+ }
+
+ if omittable := cfg.Models[schemaType.Name].Fields[field.Name].Omittable; omittable != nil {
+ f.Omittable = *omittable
+ }
+
+ if m.FieldHook != nil {
+ mf, err := m.FieldHook(schemaType, field, f)
+ if err != nil {
+ return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
}
+ f = mf
+ }
- if f.Omittable {
- if schemaType.Kind != ast.InputObject || field.Type.NonNull {
- return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name)
- }
+ if f.IsResolver && cfg.OmitResolverFields {
+ return nil, nil
+ }
- var err error
+ if f.Omittable {
+ if schemaType.Kind != ast.InputObject || field.Type.NonNull {
+ return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name)
+ }
- if omittableType == nil {
- omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable")
- if err != nil {
- return nil, err
- }
- }
+ var err error
- f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type})
+ if omittableType == nil {
+ omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable")
if err != nil {
- return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
+ return nil, err
}
}
- fields = append(fields, f)
+ f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type})
+ if err != nil {
+ return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err)
+ }
}
- fields = append(fields, getExtraFields(cfg, schemaType.Name)...)
-
- return fields, nil
+ return f, nil
}
func getExtraFields(cfg *config.Config, modelName string) []*Field {
@@ -636,29 +653,9 @@ func removeDuplicateTags(t string) string {
return returnTags
}
-// GoFieldHook applies the goField directive to the generated Field f.
+// GoFieldHook is a noop
+// TODO: This will be removed in the next breaking release
func GoFieldHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) {
- args := make([]string, 0)
- _ = args
- for _, goField := range fd.Directives.ForNames("goField") {
- if arg := goField.Arguments.ForName("name"); arg != nil {
- if k, err := arg.Value.Value(nil); err == nil {
- f.GoName = k.(string)
- }
- }
-
- if arg := goField.Arguments.ForName("forceResolver"); arg != nil {
- if k, err := arg.Value.Value(nil); err == nil {
- f.IsResolver = k.(bool)
- }
- }
-
- if arg := goField.Arguments.ForName("omittable"); arg != nil {
- if k, err := arg.Value.Value(nil); err == nil {
- f.Omittable = k.(bool)
- }
- }
- }
return f, nil
}
diff --git a/vendor/github.com/99designs/gqlgen/plugin/plugin.go b/vendor/github.com/99designs/gqlgen/plugin/plugin.go
index a5e1ba84..6cc4f6da 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/plugin.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/plugin.go
@@ -22,11 +22,18 @@ type CodeGenerator interface {
}
// EarlySourceInjector is used to inject things that are required for user schema files to compile.
+// Deprecated: Use EarlySourcesInjector instead
type EarlySourceInjector interface {
InjectSourceEarly() *ast.Source
}
+// EarlySourcesInjector is used to inject things that are required for user schema files to compile.
+type EarlySourcesInjector interface {
+ InjectSourcesEarly() ([]*ast.Source, error)
+}
+
// LateSourceInjector is used to inject more sources, after we have loaded the users schema.
+// Deprecated: Use LateSourcesInjector instead
type LateSourceInjector interface {
InjectSourceLate(schema *ast.Schema) *ast.Source
}
@@ -35,3 +42,8 @@ type LateSourceInjector interface {
type ResolverImplementer interface {
Implement(prevImplementation string, field *codegen.Field) string
}
+
+// LateSourcesInjector is used to inject more sources, after we have loaded the users schema.
+type LateSourcesInjector interface {
+ InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error)
+}
diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go
index 38138d52..c0e04089 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go
@@ -53,26 +53,44 @@ func (m *Plugin) GenerateCode(data *codegen.Data) error {
func (m *Plugin) generateSingleFile(data *codegen.Data) error {
file := File{}
-
- if _, err := os.Stat(data.Config.Resolver.Filename); err == nil {
- // file already exists and we do not support updating resolvers with layout = single so just return
- return nil
+ rewriter, err := rewrite.New(data.Config.Resolver.Dir())
+ if err != nil {
+ return err
}
for _, o := range data.Objects {
if o.HasResolvers() {
+ caser := cases.Title(language.English, cases.NoLower)
+ rewriter.MarkStructCopied(templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type))
+ rewriter.GetMethodBody(data.Config.Resolver.Type, caser.String(o.Name))
+
file.Objects = append(file.Objects, o)
}
+
for _, f := range o.Fields {
if !f.IsResolver {
continue
}
- resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil}
- file.Resolvers = append(file.Resolvers, &resolver)
+ structName := templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type)
+ comment := strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment(structName, f.GoFieldName), `\`))
+ implementation := strings.TrimSpace(rewriter.GetMethodBody(structName, f.GoFieldName))
+ if implementation != "" {
+ resolver := Resolver{o, f, rewriter.GetPrevDecl(structName, f.GoFieldName), comment, implementation, nil}
+ file.Resolvers = append(file.Resolvers, &resolver)
+ } else {
+ resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil}
+ file.Resolvers = append(file.Resolvers, &resolver)
+ }
}
}
+ if _, err := os.Stat(data.Config.Resolver.Filename); err == nil {
+ file.name = data.Config.Resolver.Filename
+ file.imports = rewriter.ExistingImports(file.name)
+ file.RemainingSource = rewriter.RemainingSource(file.name)
+ }
+
resolverBuild := &ResolverBuild{
File: &file,
PackageName: data.Config.Resolver.Package,
@@ -88,7 +106,7 @@ func (m *Plugin) generateSingleFile(data *codegen.Data) error {
return templates.Render(templates.Options{
PackageName: data.Config.Resolver.Package,
- FileNotice: `// THIS CODE IS A STARTING POINT ONLY. IT WILL NOT BE UPDATED WITH SCHEMA CHANGES.`,
+ FileNotice: `// THIS CODE WILL BE UPDATED WITH SCHEMA CHANGES. PREVIOUS IMPLEMENTATION FOR SCHEMA CHANGES WILL BE KEPT IN THE COMMENT SECTION. IMPLEMENTATION FOR UNCHANGED SCHEMA WILL BE KEPT.`,
Filename: data.Config.Resolver.Filename,
Data: resolverBuild,
Packages: data.Config.Packages,
diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl
index c25bd1d5..ad6c1085 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl
+++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl
@@ -48,5 +48,7 @@
// - When renaming or deleting a resolver the old code will be put in here. You can safely delete
// it when you're done.
// - You have helper methods in this file. Move them out to keep these resolver files clean.
+ /*
{{ .RemainingSource }}
+ */
{{ end }}
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
index f1262642..f95a504f 100644
--- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -1,5 +1,33 @@
# Changelog
+## 3.3.0 (2024-08-27)
+
+### Added
+
+- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser)
+- #213: nil version equality checking (thanks @KnutZuidema)
+
+### Changed
+
+- #241: Simplify StrictNewVersion parsing (thanks @grosser)
+- Testing support up through Go 1.23
+- Minimum version set to 1.21 as this is what's tested now
+- Fuzz testing now supports caching
+
+## 3.2.1 (2023-04-10)
+
+### Changed
+
+- #198: Improved testing around pre-release names
+- #200: Improved code scanning with addition of CodeQL
+- #201: Testing now includes Go 1.20. Go 1.17 has been dropped
+- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily
+- #203: Docs updated for security details
+
+### Fixed
+
+- #199: Fixed issue with range transformations
+
## 3.2.0 (2022-11-28)
### Added
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
index 0e7b5c71..9ca87a2c 100644
--- a/vendor/github.com/Masterminds/semver/v3/Makefile
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -19,6 +19,7 @@ test-cover:
.PHONY: fuzz
fuzz:
@echo "==> Running Fuzz Tests"
+ go env GOCACHE
go test -fuzz=FuzzNewVersion -fuzztime=15s .
go test -fuzz=FuzzStrictNewVersion -fuzztime=15s .
go test -fuzz=FuzzNewConstraint -fuzztime=15s .
@@ -27,4 +28,4 @@ $(GOLANGCI_LINT):
# Install golangci-lint. The configuration for it is in the .golangci.yml
# file in the root of the repository
echo ${GOPATH}
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
index eab8cac3..ed569360 100644
--- a/vendor/github.com/Masterminds/semver/v3/README.md
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds
[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
-If you are looking for a command line tool for version comparisons please see
-[vert](https://github.com/Masterminds/vert) which uses this library.
-
## Package Versions
-Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version.
+Note, import `github.com/Masterminds/semver/v3` to use the latest version.
There are three major versions fo the `semver` package.
@@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on
differences to notes between these two methods of comparison.
1. When two versions are compared using functions such as `Compare`, `LessThan`,
- and others it will follow the specification and always include prereleases
+ and others it will follow the specification and always include pre-releases
within the comparison. It will provide an answer that is valid with the
comparison section of the spec at https://semver.org/#spec-item-11
2. When constraint checking is used for checks or validation it will follow a
different set of rules that are common for ranges with tools like npm/js
- and Rust/Cargo. This includes considering prereleases to be invalid if the
+ and Rust/Cargo. This includes considering pre-releases to be invalid if the
ranges does not include one. If you want to have it include pre-releases a
simple solution is to include `-0` in your range.
3. Constraint ranges can have some complex rules including the shorthand use of
@@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parsable.
}
-// Check if the version meets the constraints. The a variable will be true.
+// Check if the version meets the constraints. The variable a will be true.
a := c.Check(v)
```
@@ -137,20 +134,20 @@ The basic comparisons are:
### Working With Prerelease Versions
Pre-releases, for those not familiar with them, are used for software releases
-prior to stable or generally available releases. Examples of prereleases include
-development, alpha, beta, and release candidate releases. A prerelease may be
+prior to stable or generally available releases. Examples of pre-releases include
+development, alpha, beta, and release candidate releases. A pre-release may be
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
-order of precedence, prereleases come before their associated releases. In this
+order of precedence, pre-releases come before their associated releases. In this
example `1.2.3-beta.1 < 1.2.3`.
-According to the Semantic Version specification prereleases may not be
+According to the Semantic Version specification, pre-releases may not be
API compliant with their release counterpart. It says,
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
-SemVer comparisons using constraints without a prerelease comparator will skip
-prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
-at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+SemVer's comparisons using constraints without a pre-release comparator will skip
+pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases.
The reason for the `0` as a pre-release version in the example comparison is
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
@@ -171,6 +168,9 @@ These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's
+parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`.
+
### Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
index 7c4bed33..ff499fb6 100644
--- a/vendor/github.com/Masterminds/semver/v3/version.go
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) {
original: v,
}
- // check for prerelease or build metadata
- var extra []string
- if strings.ContainsAny(parts[2], "-+") {
- // Start with the build metadata first as it needs to be on the right
- extra = strings.SplitN(parts[2], "+", 2)
- if len(extra) > 1 {
- // build metadata found
- sv.metadata = extra[1]
- parts[2] = extra[0]
+ // Extract build metadata
+ if strings.Contains(parts[2], "+") {
+ extra := strings.SplitN(parts[2], "+", 2)
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ if err := validateMetadata(sv.metadata); err != nil {
+ return nil, err
}
+ }
- extra = strings.SplitN(parts[2], "-", 2)
- if len(extra) > 1 {
- // prerelease found
- sv.pre = extra[1]
- parts[2] = extra[0]
+ // Extract build prerelease
+ if strings.Contains(parts[2], "-") {
+ extra := strings.SplitN(parts[2], "-", 2)
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ if err := validatePrerelease(sv.pre); err != nil {
+ return nil, err
}
}
@@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) {
}
}
- // Extract the major, minor, and patch elements onto the returned Version
+ // Extract major, minor, and patch
var err error
sv.major, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
@@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) {
return nil, err
}
- // No prerelease or build metadata found so returning now as a fastpath.
- if sv.pre == "" && sv.metadata == "" {
- return sv, nil
- }
-
- if sv.pre != "" {
- if err = validatePrerelease(sv.pre); err != nil {
- return nil, err
- }
- }
-
- if sv.metadata != "" {
- if err = validateMetadata(sv.metadata); err != nil {
- return nil, err
- }
- }
-
return sv, nil
}
@@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
+// LessThanEqual tests if one version is less or equal than another one.
+func (v *Version) LessThanEqual(o *Version) bool {
+ return v.Compare(o) <= 0
+}
+
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
+// GreaterThanEqual tests if one version is greater or equal than another one.
+func (v *Version) GreaterThanEqual(o *Version) bool {
+ return v.Compare(o) >= 0
+}
+
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
+ if v == o {
+ return true
+ }
+ if v == nil || o == nil {
+ return false
+ }
return v.Compare(o) == 0
}
diff --git a/vendor/github.com/adhocore/gronx/README.md b/vendor/github.com/adhocore/gronx/README.md
index 0b3b78f5..00682ee7 100644
--- a/vendor/github.com/adhocore/gronx/README.md
+++ b/vendor/github.com/adhocore/gronx/README.md
@@ -47,6 +47,14 @@ gron.IsDue(expr) // true|false, nil
gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil
```
+> Validity can be checked without instantiation:
+
+```go
+import "github.com/adhocore/gronx"
+
+gronx.IsValid("* * * * *") // true
+```
+
### Batch Due Check
If you have multiple cron expressions to check due on same reference time use `BatchDue()`:
diff --git a/vendor/github.com/adhocore/gronx/batch.go b/vendor/github.com/adhocore/gronx/batch.go
index 63d85ec2..37fddb94 100644
--- a/vendor/github.com/adhocore/gronx/batch.go
+++ b/vendor/github.com/adhocore/gronx/batch.go
@@ -7,9 +7,9 @@ import (
// Expr represents an item in array for batch check
type Expr struct {
+ Err error
Expr string
Due bool
- Err error
}
// BatchDue checks if multiple expressions are due for given time (or now).
diff --git a/vendor/github.com/adhocore/gronx/gronx.go b/vendor/github.com/adhocore/gronx/gronx.go
index 958d8fb1..82c0baa4 100644
--- a/vendor/github.com/adhocore/gronx/gronx.go
+++ b/vendor/github.com/adhocore/gronx/gronx.go
@@ -75,7 +75,9 @@ func New() *Gronx {
// IsDue checks if cron expression is due for given reference time (or now).
// It returns bool or error if any.
func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) {
- ref = append(ref, time.Now())
+ if len(ref) == 0 {
+ ref = append(ref, time.Now())
+ }
g.C.SetRef(ref[0])
segs, err := Segments(expr)
@@ -157,12 +159,16 @@ func (g *Gronx) SegmentsDue(segs []string) (bool, error) {
return true, nil
}
+// IsValid checks if cron expression is valid.
+// It returns bool.
+func (g *Gronx) IsValid(expr string) bool { return IsValid(expr) }
+
// checker for validity
var checker = &SegmentChecker{ref: time.Now()}
// IsValid checks if cron expression is valid.
// It returns bool.
-func (g *Gronx) IsValid(expr string) bool {
+func IsValid(expr string) bool {
segs, err := Segments(expr)
if err != nil {
return false
diff --git a/vendor/github.com/adhocore/gronx/validator.go b/vendor/github.com/adhocore/gronx/validator.go
index 62c3a363..d7441d4a 100644
--- a/vendor/github.com/adhocore/gronx/validator.go
+++ b/vendor/github.com/adhocore/gronx/validator.go
@@ -14,7 +14,7 @@ func inStep(val int, s string, bounds []int) (bool, error) {
if err != nil {
return false, err
}
- if step == 0 {
+ if step <= 0 {
return false, errors.New("step can't be 0")
}
diff --git a/vendor/github.com/agnivade/levenshtein/.travis.yml b/vendor/github.com/agnivade/levenshtein/.travis.yml
deleted file mode 100644
index 0873fa98..00000000
--- a/vendor/github.com/agnivade/levenshtein/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-language: go
-
-# See https://travis-ci.community/t/goos-js-goarch-wasm-go-run-fails-panic-newosproc-not-implemented/1651
-#addons:
-# chrome: stable
-
-before_install:
-- export GO111MODULE=on
-
-#install:
-#- go get github.com/agnivade/wasmbrowsertest
-#- mv $GOPATH/bin/wasmbrowsertest $GOPATH/bin/go_js_wasm_exec
-#- export PATH=$GOPATH/bin:$PATH
-
-go:
-- 1.13.x
-- 1.14.x
-- 1.15.x
-- tip
-
-script:
-#- GOOS=js GOARCH=wasm go test -v
-- go test -v
diff --git a/vendor/github.com/agnivade/levenshtein/Makefile b/vendor/github.com/agnivade/levenshtein/Makefile
index 5f6890d6..3bbda319 100644
--- a/vendor/github.com/agnivade/levenshtein/Makefile
+++ b/vendor/github.com/agnivade/levenshtein/Makefile
@@ -4,12 +4,10 @@ install:
go install
lint:
- gofmt -l -s -w . && go vet . && golint -set_exit_status=1 .
+ gofmt -l -s -w . && go vet .
-test: # The first 2 go gets are to support older Go versions
- go get github.com/arbovm/levenshtein
- go get github.com/dgryski/trifles/leven
- GO111MODULE=on go test -race -v -coverprofile=coverage.txt -covermode=atomic
+test:
+ go test -race -v -coverprofile=coverage.txt -covermode=atomic
bench:
go test -run=XXX -bench=. -benchmem -count=5
diff --git a/vendor/github.com/agnivade/levenshtein/README.md b/vendor/github.com/agnivade/levenshtein/README.md
index 13c52a21..34378aab 100644
--- a/vendor/github.com/agnivade/levenshtein/README.md
+++ b/vendor/github.com/agnivade/levenshtein/README.md
@@ -1,4 +1,4 @@
-levenshtein [![Build Status](https://travis-ci.org/agnivade/levenshtein.svg?branch=master)](https://travis-ci.org/agnivade/levenshtein) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein)
+levenshtein ![Build Status](https://github.com/agnivade/levenshtein/actions/workflows/ci.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein)
===========
[Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance)
diff --git a/vendor/github.com/agnivade/levenshtein/levenshtein.go b/vendor/github.com/agnivade/levenshtein/levenshtein.go
index f727a66f..861f409d 100644
--- a/vendor/github.com/agnivade/levenshtein/levenshtein.go
+++ b/vendor/github.com/agnivade/levenshtein/levenshtein.go
@@ -41,6 +41,25 @@ func ComputeDistance(a, b string) int {
if len(s1) > len(s2) {
s1, s2 = s2, s1
}
+
+ // remove trailing identical runes.
+ for i := 0; i < len(s1); i++ {
+ if s1[len(s1)-1-i] != s2[len(s2)-1-i] {
+ s1 = s1[:len(s1)-i]
+ s2 = s2[:len(s2)-i]
+ break
+ }
+ }
+
+ // Remove leading identical runes.
+ for i := 0; i < len(s1); i++ {
+ if s1[i] != s2[i] {
+ s1 = s1[i:]
+ s2 = s2[i:]
+ break
+ }
+ }
+
lenS1 := len(s1)
lenS2 := len(s2)
@@ -71,7 +90,7 @@ func ComputeDistance(a, b string) int {
for j := 1; j <= lenS1; j++ {
current := x[j-1] // match
if s2[i-1] != s1[j-1] {
- current = min(min(x[j-1]+1, prev+1), x[j]+1)
+ current = min(x[j-1]+1, prev+1, x[j]+1)
}
x[j-1] = prev
prev = current
@@ -80,10 +99,3 @@ func ComputeDistance(a, b string) int {
}
return int(x[lenS1])
}
-
-func min(a, b uint16) uint16 {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/caddyserver/certmagic/account.go b/vendor/github.com/caddyserver/certmagic/account.go
index f3b8d44d..0c43ad63 100644
--- a/vendor/github.com/caddyserver/certmagic/account.go
+++ b/vendor/github.com/caddyserver/certmagic/account.go
@@ -33,6 +33,7 @@ import (
"sync"
"github.com/mholt/acmez/v2/acme"
+ "go.uber.org/zap"
)
// getAccount either loads or creates a new account, depending on if
@@ -40,8 +41,15 @@ import (
func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) {
acct, err := am.loadAccount(ctx, ca, email)
if errors.Is(err, fs.ErrNotExist) {
+ am.Logger.Info("creating new account because no account for configured email is known to us",
+ zap.String("email", email),
+ zap.String("ca", ca),
+ zap.Error(err))
return am.newAccount(email)
}
+ am.Logger.Debug("using existing ACME account because key found in storage associated with email",
+ zap.String("email", email),
+ zap.String("ca", ca))
return acct, err
}
@@ -407,6 +415,15 @@ func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string)
return getPrimaryContact(account), true
}
+func accountRegLockKey(acc acme.Account) string {
+ key := "register_acme_account"
+ if len(acc.Contact) == 0 {
+ return key
+ }
+ key += "_" + getPrimaryContact(acc)
+ return key
+}
+
// getPrimaryContact returns the first contact on the account (if any)
// without the scheme. (I guess we assume an email address.)
func getPrimaryContact(account acme.Account) string {
diff --git a/vendor/github.com/caddyserver/certmagic/acmeclient.go b/vendor/github.com/caddyserver/certmagic/acmeclient.go
index 031aaa11..c6e1f6ed 100644
--- a/vendor/github.com/caddyserver/certmagic/acmeclient.go
+++ b/vendor/github.com/caddyserver/certmagic/acmeclient.go
@@ -50,77 +50,123 @@ func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA,
return nil, err
}
- // look up or create the ACME account
- var account acme.Account
- if iss.AccountKeyPEM != "" {
- account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
- } else {
- account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
+ // we try loading the account from storage before a potential
+ // lock, and after obtaining the lock as well, to ensure we don't
+ // repeat work done by another instance or goroutine
+ getAccount := func() (acme.Account, error) {
+ // look up or create the ACME account
+ var account acme.Account
+ if iss.AccountKeyPEM != "" {
+ iss.Logger.Info("using configured ACME account")
+ account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
+ } else {
+ account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
+ }
+ if err != nil {
+ return acme.Account{}, fmt.Errorf("getting ACME account: %v", err)
+ }
+ return account, nil
}
+
+ // first try getting the account
+ account, err := getAccount()
if err != nil {
- return nil, fmt.Errorf("getting ACME account: %v", err)
+ return nil, err
}
// register account if it is new
if account.Status == "" {
- if iss.NewAccountFunc != nil {
- // obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer
- iss.mu.Lock()
- account, err = iss.NewAccountFunc(ctx, iss, account)
- iss.mu.Unlock()
- if err != nil {
- return nil, fmt.Errorf("account pre-registration callback: %v", err)
+ iss.Logger.Info("ACME account has empty status; registering account with ACME server",
+ zap.Strings("contact", account.Contact),
+ zap.String("location", account.Location))
+
+ // synchronize this so the account is only created once
+ acctLockKey := accountRegLockKey(account)
+ err = acquireLock(ctx, iss.config.Storage, acctLockKey)
+ if err != nil {
+ return nil, fmt.Errorf("locking account registration: %v", err)
+ }
+ defer func() {
+ if err := releaseLock(ctx, iss.config.Storage, acctLockKey); err != nil {
+ iss.Logger.Error("failed to unlock account registration lock", zap.Error(err))
}
+ }()
+
+ // if we're not the only one waiting for this account, then by this point it should already be registered and in storage; reload it
+ account, err = getAccount()
+ if err != nil {
+ return nil, err
}
- // agree to terms
- if interactive {
- if !iss.isAgreed() {
- var termsURL string
- dir, err := client.GetDirectory(ctx)
+ // if we are the only or first one waiting for this account, then proceed to register it while we have the lock
+ if account.Status == "" {
+ if iss.NewAccountFunc != nil {
+ // obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer
+ iss.mu.Lock()
+ account, err = iss.NewAccountFunc(ctx, iss, account)
+ iss.mu.Unlock()
if err != nil {
- return nil, fmt.Errorf("getting directory: %w", err)
- }
- if dir.Meta != nil {
- termsURL = dir.Meta.TermsOfService
+ return nil, fmt.Errorf("account pre-registration callback: %v", err)
}
- if termsURL != "" {
- agreed := iss.askUserAgreement(termsURL)
- if !agreed {
- return nil, fmt.Errorf("user must agree to CA terms")
+ }
+
+ // agree to terms
+ if interactive {
+ if !iss.isAgreed() {
+ var termsURL string
+ dir, err := client.GetDirectory(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("getting directory: %w", err)
+ }
+ if dir.Meta != nil {
+ termsURL = dir.Meta.TermsOfService
+ }
+ if termsURL != "" {
+ agreed := iss.askUserAgreement(termsURL)
+ if !agreed {
+ return nil, fmt.Errorf("user must agree to CA terms")
+ }
+ iss.mu.Lock()
+ iss.agreed = agreed
+ iss.mu.Unlock()
}
- iss.mu.Lock()
- iss.agreed = agreed
- iss.mu.Unlock()
}
+ } else {
+ // can't prompt a user who isn't there; they should
+ // have reviewed the terms beforehand
+ iss.mu.Lock()
+ iss.agreed = true
+ iss.mu.Unlock()
}
- } else {
- // can't prompt a user who isn't there; they should
- // have reviewed the terms beforehand
- iss.mu.Lock()
- iss.agreed = true
- iss.mu.Unlock()
- }
- account.TermsOfServiceAgreed = iss.isAgreed()
+ account.TermsOfServiceAgreed = iss.isAgreed()
- // associate account with external binding, if configured
- if iss.ExternalAccount != nil {
- err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
- if err != nil {
- return nil, err
+ // associate account with external binding, if configured
+ if iss.ExternalAccount != nil {
+ err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
+ if err != nil {
+ return nil, err
+ }
}
- }
- // create account
- account, err = client.NewAccount(ctx, account)
- if err != nil {
- return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
- }
+ // create account
+ account, err = client.NewAccount(ctx, account)
+ if err != nil {
+ return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
+ }
+ iss.Logger.Info("new ACME account registered",
+ zap.Strings("contact", account.Contact),
+ zap.String("status", account.Status))
- // persist the account to storage
- err = iss.saveAccount(ctx, client.Directory, account)
- if err != nil {
- return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
+ // persist the account to storage
+ err = iss.saveAccount(ctx, client.Directory, account)
+ if err != nil {
+ return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
+ }
+ } else {
+ iss.Logger.Info("account has already been registered; reloaded",
+ zap.Strings("contact", account.Contact),
+ zap.String("status", account.Status),
+ zap.String("location", account.Location))
}
}
diff --git a/vendor/github.com/caddyserver/certmagic/acmeissuer.go b/vendor/github.com/caddyserver/certmagic/acmeissuer.go
index 87fa5ff5..e010f087 100644
--- a/vendor/github.com/caddyserver/certmagic/acmeissuer.go
+++ b/vendor/github.com/caddyserver/certmagic/acmeissuer.go
@@ -461,7 +461,7 @@ func (am *ACMEIssuer) doIssue(ctx context.Context, csr *x509.CertificateRequest,
// between client and server or some sort of bookkeeping error with regards to the certID
// and the server is rejecting the ARI certID. In any case, an invalid certID may cause
// orders to fail. So try once without setting it.
- if !usingTestCA && attempts != 2 {
+ if !am.config.DisableARI && !usingTestCA && attempts != 2 {
if replacing, ok := ctx.Value(ctxKeyARIReplaces).(*x509.Certificate); ok {
params.Replaces = replacing
}
diff --git a/vendor/github.com/caddyserver/certmagic/certificates.go b/vendor/github.com/caddyserver/certmagic/certificates.go
index a5147a2d..2965712a 100644
--- a/vendor/github.com/caddyserver/certmagic/certificates.go
+++ b/vendor/github.com/caddyserver/certmagic/certificates.go
@@ -103,53 +103,54 @@ func (cfg *Config) certNeedsRenewal(leaf *x509.Certificate, ari acme.RenewalInfo
logger = zap.NewNop()
}
- // first check ARI: if it says it's time to renew, it's time to renew
- // (notice that we don't strictly require an ARI window to also exist; we presume
- // that if a time has been selected, a window does or did exist, even if it didn't
- // get stored/encoded for some reason - but also: this allows administrators to
- // manually or explicitly schedule a renewal time indepedently of ARI which could
- // be useful)
- selectedTime := ari.SelectedTime
-
- // if, for some reason a random time in the window hasn't been selected yet, but an ARI
- // window does exist, we can always improvise one... even if this is called repeatedly,
- // a random time is a random time, whether you generate it once or more :D
- // (code borrowed from our acme package)
- if selectedTime.IsZero() &&
- (!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) {
- start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix()
- selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC()
- logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now",
- zap.Time("ephemeral_selected_time", selectedTime))
- }
-
- // if a renewal time has been selected, start with that
- if !selectedTime.IsZero() {
- // ARI spec recommends an algorithm that renews after the randomly-selected
- // time OR just before it if the next waking time would be after it; this
- // cutoff can actually be before the start of the renewal window, but the spec
- // author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71
- cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval)
- if time.Now().After(cutoff) {
- logger.Info("certificate needs renewal based on ARI window",
- zap.Time("selected_time", selectedTime),
- zap.Time("renewal_cutoff", cutoff))
- return true
+ if !cfg.DisableARI {
+ // first check ARI: if it says it's time to renew, it's time to renew
+ // (notice that we don't strictly require an ARI window to also exist; we presume
+ // that if a time has been selected, a window does or did exist, even if it didn't
+ // get stored/encoded for some reason - but also: this allows administrators to
+ // manually or explicitly schedule a renewal time indepedently of ARI which could
+ // be useful)
+ selectedTime := ari.SelectedTime
+
+ // if, for some reason a random time in the window hasn't been selected yet, but an ARI
+ // window does exist, we can always improvise one... even if this is called repeatedly,
+ // a random time is a random time, whether you generate it once or more :D
+ // (code borrowed from our acme package)
+ if selectedTime.IsZero() &&
+ (!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) {
+ start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix()
+ selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC()
+ logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now",
+ zap.Time("ephemeral_selected_time", selectedTime))
}
- // according to ARI, we are not ready to renew; however, we do not rely solely on
- // ARI calculations... what if there is a bug in our implementation, or in the
- // server's, or the stored metadata? for redundancy, give credence to the expiration
- // date; ignore ARI if we are past a "dangerously close" limit, to avoid any
- // possibility of a bug in ARI compromising a site's uptime: we should always always
- // always give heed to actual validity period
- if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) {
- logger.Warn("certificate is in emergency renewal window; superceding ARI",
- zap.Duration("remaining", time.Until(expiration)),
- zap.Time("renewal_cutoff", cutoff))
- return true
+ // if a renewal time has been selected, start with that
+ if !selectedTime.IsZero() {
+ // ARI spec recommends an algorithm that renews after the randomly-selected
+ // time OR just before it if the next waking time would be after it; this
+ // cutoff can actually be before the start of the renewal window, but the spec
+ // author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71
+ cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval)
+ if time.Now().After(cutoff) {
+ logger.Info("certificate needs renewal based on ARI window",
+ zap.Time("selected_time", selectedTime),
+ zap.Time("renewal_cutoff", cutoff))
+ return true
+ }
+
+ // according to ARI, we are not ready to renew; however, we do not rely solely on
+ // ARI calculations... what if there is a bug in our implementation, or in the
+ // server's, or the stored metadata? for redundancy, give credence to the expiration
+ // date; ignore ARI if we are past a "dangerously close" limit, to avoid any
+ // possibility of a bug in ARI compromising a site's uptime: we should always always
+ // always give heed to actual validity period
+ if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) {
+ logger.Warn("certificate is in emergency renewal window; superceding ARI",
+ zap.Duration("remaining", time.Until(expiration)),
+ zap.Time("renewal_cutoff", cutoff))
+ return true
+ }
}
-
}
// the normal check, in the absence of ARI, is to determine if we're near enough (or past)
@@ -552,6 +553,7 @@ func SubjectIsInternal(subj string) bool {
return subj == "localhost" ||
strings.HasSuffix(subj, ".localhost") ||
strings.HasSuffix(subj, ".local") ||
+ strings.HasSuffix(subj, ".internal") ||
strings.HasSuffix(subj, ".home.arpa") ||
isInternalIP(subj)
}
diff --git a/vendor/github.com/caddyserver/certmagic/config.go b/vendor/github.com/caddyserver/certmagic/config.go
index 5a9cf498..a7771848 100644
--- a/vendor/github.com/caddyserver/certmagic/config.go
+++ b/vendor/github.com/caddyserver/certmagic/config.go
@@ -149,6 +149,10 @@ type Config struct {
// EXPERIMENTAL: Subject to change or removal.
SubjectTransformer func(ctx context.Context, domain string) string
+ // Disables both ARI fetching and the use of ARI for renewal decisions.
+ // TEMPORARY: Will likely be removed in the future.
+ DisableARI bool
+
// Set a logger to enable logging. If not set,
// a default logger will be created.
Logger *zap.Logger
@@ -370,9 +374,11 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
}
for _, domainName := range domainNames {
+ domainName = normalizedName(domainName)
+
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
- cfg.OnDemand.hostAllowlist[normalizedName(domainName)] = struct{}{}
+ cfg.OnDemand.hostAllowlist[domainName] = struct{}{}
continue
}
@@ -449,7 +455,7 @@ func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool)
// ensure ARI is updated before we check whether the cert needs renewing
// (we ignore the second return value because we already check if needs renewing anyway)
- if cert.ari.NeedsRefresh() {
+ if !cfg.DisableARI && cert.ari.NeedsRefresh() {
cert, _, err = cfg.updateARI(ctx, cert, cfg.Logger)
if err != nil {
cfg.Logger.Error("updating ARI upon managing", zap.Error(err))
@@ -886,11 +892,13 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv
// if we're renewing with the same ACME CA as before, have the ACME
// client tell the server we are replacing a certificate (but doing
// this on the wrong CA, or when the CA doesn't recognize the certID,
- // can fail the order)
- if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" {
- if acmeIss, ok := issuer.(*ACMEIssuer); ok {
- if acmeIss.CA == acmeData.CA {
- ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf)
+ // can fail the order) -- TODO: change this check to whether we're using the same ACME account, not CA
+ if !cfg.DisableARI {
+ if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" {
+ if acmeIss, ok := issuer.(*ACMEIssuer); ok {
+ if acmeIss.CA == acmeData.CA {
+ ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf)
+ }
}
}
}
@@ -982,23 +990,26 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC
csrTemplate := new(x509.CertificateRequest)
for _, name := range sans {
+ // identifiers should be converted to punycode before going into the CSR
+ // (convert IDNs to ASCII according to RFC 5280 section 7)
+ normalizedName, err := idna.ToASCII(name)
+ if err != nil {
+ return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
+ }
+
// TODO: This is a temporary hack to support ZeroSSL API...
- if useCN && csrTemplate.Subject.CommonName == "" && len(name) <= 64 {
- csrTemplate.Subject.CommonName = name
+ if useCN && csrTemplate.Subject.CommonName == "" && len(normalizedName) <= 64 {
+ csrTemplate.Subject.CommonName = normalizedName
continue
}
- if ip := net.ParseIP(name); ip != nil {
+
+ if ip := net.ParseIP(normalizedName); ip != nil {
csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip)
- } else if strings.Contains(name, "@") {
- csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name)
- } else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") {
+ } else if strings.Contains(normalizedName, "@") {
+ csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, normalizedName)
+ } else if u, err := url.Parse(normalizedName); err == nil && strings.Contains(normalizedName, "/") {
csrTemplate.URIs = append(csrTemplate.URIs, u)
} else {
- // convert IDNs to ASCII according to RFC 5280 section 7
- normalizedName, err := idna.ToASCII(name)
- if err != nil {
- return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err)
- }
csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName)
}
}
@@ -1007,6 +1018,16 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC
csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension)
}
+ // IP addresses aren't printed here because I'm too lazy to marshal them as strings, but
+ // we at least print the incoming SANs so it should be obvious what became IPs
+ cfg.Logger.Debug("created CSR",
+ zap.Strings("identifiers", sans),
+ zap.Strings("san_dns_names", csrTemplate.DNSNames),
+ zap.Strings("san_emails", csrTemplate.EmailAddresses),
+ zap.String("common_name", csrTemplate.Subject.CommonName),
+ zap.Int("extra_extensions", len(csrTemplate.ExtraExtensions)),
+ )
+
csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey)
if err != nil {
return nil, err
@@ -1244,8 +1265,10 @@ func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource, emitLogs
return 0, nil, true
}
var ari acme.RenewalInfo
- if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil {
- ari = *ariPtr
+ if !cfg.DisableARI {
+ if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil {
+ ari = *ariPtr
+ }
}
remaining := time.Until(expiresAt(certChain[0]))
return remaining, certChain[0], cfg.certNeedsRenewal(certChain[0], ari, emitLogs)
diff --git a/vendor/github.com/caddyserver/certmagic/filestorage.go b/vendor/github.com/caddyserver/certmagic/filestorage.go
index f6f13603..d3df9cf7 100644
--- a/vendor/github.com/caddyserver/certmagic/filestorage.go
+++ b/vendor/github.com/caddyserver/certmagic/filestorage.go
@@ -27,6 +27,8 @@ import (
"path/filepath"
"runtime"
"time"
+
+ "github.com/caddyserver/certmagic/internal/atomicfile"
)
// FileStorage facilitates forming file paths derived from a root
@@ -82,12 +84,30 @@ func (s *FileStorage) Store(_ context.Context, key string, value []byte) error {
if err != nil {
return err
}
- return os.WriteFile(filename, value, 0600)
+ fp, err := atomicfile.New(filename, 0o600)
+ if err != nil {
+ return err
+ }
+ _, err = fp.Write(value)
+ if err != nil {
+ // cancel the write
+ fp.Cancel()
+ return err
+ }
+ // close, thereby flushing the write
+ return fp.Close()
}
// Load retrieves the value at key.
func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) {
- return os.ReadFile(s.Filename(key))
+ // i believe it's possible for the read call to error but still return bytes, in event of something like a shortread?
+ // therefore, i think it's appropriate to not return any bytes to avoid downstream users of the package erroniously believing that
+ // bytes read + error is a valid response (it should not be)
+ xs, err := os.ReadFile(s.Filename(key))
+ if err != nil {
+ return nil, err
+ }
+ return xs, nil
}
// Delete deletes the value at key.
diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go
index fd576995..1ff0ce27 100644
--- a/vendor/github.com/caddyserver/certmagic/handshake.go
+++ b/vendor/github.com/caddyserver/certmagic/handshake.go
@@ -582,7 +582,7 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe
}
// Check ARI status
- if cert.ari.NeedsRefresh() {
+ if !cfg.DisableARI && cert.ari.NeedsRefresh() {
// we ignore the second return value here because we go on to check renewal status below regardless
var err error
cert, _, err = cfg.updateARI(ctx, cert, logger)
diff --git a/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README
new file mode 100644
index 00000000..17d04ddd
--- /dev/null
+++ b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README
@@ -0,0 +1,11 @@
+# atomic file
+
+
+this is copied from
+
+https://github.com/containerd/containerd/blob/main/pkg%2Fatomicfile%2Ffile.go
+
+
+see
+
+https://github.com/caddyserver/certmagic/issues/296
diff --git a/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go
new file mode 100644
index 00000000..7b870f7a
--- /dev/null
+++ b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go
@@ -0,0 +1,148 @@
+/*
+ Copyright The containerd Authors.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+*/
+
+/*
+Package atomicfile provides a mechanism (on Unix-like platforms) to present a consistent view of a file to separate
+processes even while the file is being written. This is accomplished by writing a temporary file, syncing to disk, and
+renaming over the destination file name.
+
+Partial/inconsistent reads can occur due to:
+ 1. A process attempting to read the file while it is being written to (both in the case of a new file with a
+ short/incomplete write or in the case of an existing, updated file where new bytes may be written at the beginning
+ but old bytes may still be present after).
+ 2. Concurrent goroutines leading to multiple active writers of the same file.
+
+The above mechanism explicitly protects against (1) as all writes are to a file with a temporary name.
+
+There is no explicit protection against multiple, concurrent goroutines attempting to write the same file. However,
+atomically writing the file should mean only one writer will "win" and a consistent file will be visible.
+
+Note: atomicfile is partially implemented for Windows. The Windows codepath performs the same operations, however
+Windows does not guarantee that a rename operation is atomic; a crash in the middle may leave the destination file
+truncated rather than with the expected content.
+*/
+package atomicfile
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// File is an io.ReadWriteCloser that can also be Canceled if a change needs to be abandoned.
+type File interface {
+ io.ReadWriteCloser
+ // Cancel abandons a change to a file. This can be called if a write fails or another error occurs.
+ Cancel() error
+}
+
+// ErrClosed is returned if Read or Write are called on a closed File.
+var ErrClosed = errors.New("file is closed")
+
+// New returns a new atomic file. On Unix-like platforms, the writer (an io.ReadWriteCloser) is backed by a temporary
+// file placed into the same directory as the destination file (using filepath.Dir to split the directory from the
+// name). On a call to Close the temporary file is synced to disk and renamed to its final name, hiding any previous
+// file by the same name.
+//
+// Note: Take care to call Close and handle any errors that are returned. Errors returned from Close may indicate that
+// the file was not written with its final name.
+func New(name string, mode os.FileMode) (File, error) {
+ return newFile(name, mode)
+}
+
+type atomicFile struct {
+ name string
+ f *os.File
+ closed bool
+ closedMu sync.RWMutex
+}
+
+func newFile(name string, mode os.FileMode) (File, error) {
+ dir := filepath.Dir(name)
+ f, err := os.CreateTemp(dir, "")
+ if err != nil {
+ return nil, fmt.Errorf("failed to create temp file: %w", err)
+ }
+ if err := f.Chmod(mode); err != nil {
+ return nil, fmt.Errorf("failed to change temp file permissions: %w", err)
+ }
+ return &atomicFile{name: name, f: f}, nil
+}
+
+func (a *atomicFile) Close() (err error) {
+ a.closedMu.Lock()
+ defer a.closedMu.Unlock()
+
+ if a.closed {
+ return nil
+ }
+ a.closed = true
+
+ defer func() {
+ if err != nil {
+ _ = os.Remove(a.f.Name()) // ignore errors
+ }
+ }()
+ // The order of operations here is:
+ // 1. sync
+ // 2. close
+ // 3. rename
+ // While the ordering of 2 and 3 is not important on Unix-like operating systems, Windows cannot rename an open
+ // file. By closing first, we allow the rename operation to succeed.
+ if err = a.f.Sync(); err != nil {
+ return fmt.Errorf("failed to sync temp file %q: %w", a.f.Name(), err)
+ }
+ if err = a.f.Close(); err != nil {
+ return fmt.Errorf("failed to close temp file %q: %w", a.f.Name(), err)
+ }
+ if err = os.Rename(a.f.Name(), a.name); err != nil {
+ return fmt.Errorf("failed to rename %q to %q: %w", a.f.Name(), a.name, err)
+ }
+ return nil
+}
+
+func (a *atomicFile) Cancel() error {
+ a.closedMu.Lock()
+ defer a.closedMu.Unlock()
+
+ if a.closed {
+ return nil
+ }
+ a.closed = true
+ _ = a.f.Close() // ignore error
+ return os.Remove(a.f.Name())
+}
+
+func (a *atomicFile) Read(p []byte) (n int, err error) {
+ a.closedMu.RLock()
+ defer a.closedMu.RUnlock()
+ if a.closed {
+ return 0, ErrClosed
+ }
+ return a.f.Read(p)
+}
+
+func (a *atomicFile) Write(p []byte) (n int, err error) {
+ a.closedMu.RLock()
+ defer a.closedMu.RUnlock()
+ if a.closed {
+ return 0, ErrClosed
+ }
+ return a.f.Write(p)
+}
diff --git a/vendor/github.com/caddyserver/certmagic/maintain.go b/vendor/github.com/caddyserver/certmagic/maintain.go
index 88d36531..dea2cfdf 100644
--- a/vendor/github.com/caddyserver/certmagic/maintain.go
+++ b/vendor/github.com/caddyserver/certmagic/maintain.go
@@ -136,7 +136,7 @@ func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
}
// ACME-specific: see if if ACME Renewal Info (ARI) window needs refreshing
- if cert.ari.NeedsRefresh() {
+ if !cfg.DisableARI && cert.ari.NeedsRefresh() {
configs[cert.hash] = cfg
ariQueue = append(ariQueue, cert)
}
@@ -427,7 +427,7 @@ func (cfg *Config) storageHasNewerARI(ctx context.Context, cert Certificate) (bo
// or if the one in storage has a later RetryAfter (though I suppose
// it's not guaranteed, typically those will move forward in time)
if (!cert.ari.HasWindow() && storedCertData.RenewalInfo.HasWindow()) ||
- storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter) {
+ (cert.ari.RetryAfter == nil || storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter)) {
return true, *storedCertData.RenewalInfo, nil
}
return false, acme.RenewalInfo{}, nil
@@ -459,6 +459,9 @@ func (cfg *Config) loadStoredACMECertificateMetadata(ctx context.Context, cert C
// updated in the cache. The certificate with the updated ARI is returned. If true is
// returned, the ARI window or selected time has changed, and the caller should check if
// the cert needs to be renewed now, even if there is an error.
+//
+// This will always try to ARI without checking if it needs to be refreshed. Call
+// NeedsRefresh() on the RenewalInfo first, and only call this if that returns true.
func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap.Logger) (updatedCert Certificate, changed bool, err error) {
logger = logger.With(
zap.Strings("identifiers", cert.Names),
@@ -469,6 +472,17 @@ func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap.
updatedCert = cert
oldARI := cert.ari
+ // synchronize ARI fetching; see #297
+ lockName := "ari_" + cert.ari.UniqueIdentifier
+ if err := acquireLock(ctx, cfg.Storage, lockName); err != nil {
+ return cert, false, fmt.Errorf("unable to obtain ARI lock: %v", err)
+ }
+ defer func() {
+ if err := releaseLock(ctx, cfg.Storage, lockName); err != nil {
+ logger.Error("unable to release ARI lock", zap.Error(err))
+ }
+ }()
+
// see if the stored value has been refreshed already by another instance
gotNewARI, newARI, err := cfg.storageHasNewerARI(ctx, cert)
@@ -615,11 +629,11 @@ func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions
opts.Logger = opts.Logger.With(zap.Any("storage", storage))
// storage cleaning should be globally exclusive
- if err := storage.Lock(ctx, lockName); err != nil {
+ if err := acquireLock(ctx, storage, lockName); err != nil {
return fmt.Errorf("unable to acquire %s lock: %v", lockName, err)
}
defer func() {
- if err := storage.Unlock(ctx, lockName); err != nil {
+ if err := releaseLock(ctx, storage, lockName); err != nil {
opts.Logger.Error("unable to release lock", zap.Error(err))
return
}
diff --git a/vendor/github.com/caddyserver/certmagic/zerosslissuer.go b/vendor/github.com/caddyserver/certmagic/zerosslissuer.go
index 8ee044b4..4a33bfa2 100644
--- a/vendor/github.com/caddyserver/certmagic/zerosslissuer.go
+++ b/vendor/github.com/caddyserver/certmagic/zerosslissuer.go
@@ -146,7 +146,7 @@ func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateReques
// create the CNAME record(s)
records := make(map[string]zoneRecord, len(cert.Validation.OtherMethods))
for name, verifyInfo := range cert.Validation.OtherMethods {
- zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2)
+ zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2+".") // see issue #304
if err != nil {
return nil, fmt.Errorf("creating CNAME record: %v", err)
}
diff --git a/vendor/github.com/datarhei/gosrt/README.md b/vendor/github.com/datarhei/gosrt/README.md
index 25681a47..f6133c7e 100644
--- a/vendor/github.com/datarhei/gosrt/README.md
+++ b/vendor/github.com/datarhei/gosrt/README.md
@@ -42,7 +42,7 @@ The parts that are implemented are based on what has been published in the SRT R
## Requirements
-A Go version of 1.18+ is required.
+A Go version of 1.20+ is required.
## Installation
@@ -89,39 +89,42 @@ if err != nil {
}
for {
- conn, mode, err := ln.Accept(func(req ConnRequest) ConnType {
- // check connection request
- return srt.REJECT
- })
+ req, err := ln.Accept2()
if err != nil {
// handle error
}
- if mode == srt.REJECT {
- // rejected connection, ignore
- continue
- }
+ go func(req ConnRequest) {
+ // check connection request by inspecting the connection request
+ // and either rejecting it ...
- if mode == srt.PUBLISH {
- go handlePublish(conn)
- } else { // srt.SUBSCRIBE
- go handleSubscribe(conn)
- }
+ if somethingIsWrong {
+ req.Reject(srt.REJ_PEER)
+ return
+ }
+
+ // ... or accepting it ...
+
+ conn, err := req.Accept()
+ if err != nil {
+ return
+ }
+
+ // ... and decide whether it is a publishing or subscribing connection.
+
+ if publish {
+ handlePublish(conn)
+ } else {
+ handleSubscribe(conn)
+ }
+ }(req)
}
```
In the `contrib/server` directory you'll find a complete example of a SRT server. For your convenience
-this modules provides the `Server` type which is a light framework for creating your own SRT server. The
+this module provides the `Server` type which is a light framework for creating your own SRT server. The
example server is based on this type.
-### PUBLISH / SUBSCRIBE
-
-The `Accept` function from the `Listener` expects a function that handles the connection requests. It can
-return 3 different values: `srt.PUBLISH`, `srt.SUBSCRIBE`, and `srt.REJECT`. `srt.PUBLISH` means that the
-server expects the caller to send data, whereas `srt.SUBSCRIBE` means that the server will send data to
-the caller. This is opiniated towards a streaming server, however in your implementation of a listener
-you are free to handle connections requests to your liking.
-
## Contributed client
In the `contrib/client` directory you'll find an example implementation of a SRT client.
diff --git a/vendor/github.com/datarhei/gosrt/conn_request.go b/vendor/github.com/datarhei/gosrt/conn_request.go
new file mode 100644
index 00000000..80576955
--- /dev/null
+++ b/vendor/github.com/datarhei/gosrt/conn_request.go
@@ -0,0 +1,421 @@
+package srt
+
+import (
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/datarhei/gosrt/crypto"
+ "github.com/datarhei/gosrt/packet"
+)
+
+// ConnRequest is an incoming connection request
+type ConnRequest interface {
+ // RemoteAddr returns the address of the peer. The returned net.Addr
+ // is a copy and can be used at will.
+ RemoteAddr() net.Addr
+
+ // Version returns the handshake version of the incoming request. Currently
+ // known versions are 4 and 5. With version 4 the StreamId will always be
+ // empty and IsEncrypted will always return false. An incoming version 4
+ // connection will always be publishing.
+ Version() uint32
+
+ // StreamId returns the streamid of the requesting connection. Use this
+ // to decide what to do with the connection.
+ StreamId() string
+
+ // IsEncrypted returns whether the connection is encrypted. If it is
+ // encrypted, use SetPassphrase to set the passphrase for decrypting.
+ IsEncrypted() bool
+
+ // SetPassphrase sets the passphrase in order to decrypt the incoming
+ // data. Returns an error if the passphrase did not work or the connection
+ // is not encrypted.
+ SetPassphrase(p string) error
+
+ // SetRejectionReason sets the rejection reason for the connection. If
+ // no set, REJ_PEER will be used.
+ //
+ // Deprecated: replaced by Reject().
+ SetRejectionReason(r RejectionReason)
+
+ // Accept accepts the request and returns a connection.
+ Accept() (Conn, error)
+
+ // Reject rejects the request.
+ Reject(r RejectionReason)
+}
+
+// connRequest implements the ConnRequest interface
+type connRequest struct {
+ ln *listener
+ addr net.Addr
+ start time.Time
+ socketId uint32
+ timestamp uint32
+ config Config
+ handshake *packet.CIFHandshake
+ crypto crypto.Crypto
+ passphrase string
+ rejectionReason RejectionReason
+}
+
+func newConnRequest(ln *listener, p packet.Packet) *connRequest {
+ cif := &packet.CIFHandshake{}
+
+ err := p.UnmarshalCIF(cif)
+
+ ln.log("handshake:recv:dump", func() string { return p.Dump() })
+ ln.log("handshake:recv:cif", func() string { return cif.String() })
+
+ if err != nil {
+ ln.log("handshake:recv:error", func() string { return err.Error() })
+ return nil
+ }
+
+ // Assemble the response (4.3.1. Caller-Listener Handshake)
+
+ p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
+ p.Header().SubType = 0
+ p.Header().TypeSpecific = 0
+ p.Header().Timestamp = uint32(time.Since(ln.start).Microseconds())
+ p.Header().DestinationSocketId = cif.SRTSocketId
+
+ cif.PeerIP.FromNetAddr(ln.addr)
+
+ // Create a copy of the configuration for the connection
+ config := ln.config
+
+ if cif.HandshakeType == packet.HSTYPE_INDUCTION {
+ // cif
+ cif.Version = 5
+ cif.EncryptionField = 0 // Don't advertise any specific encryption method
+ cif.ExtensionField = 0x4A17
+ //cif.initialPacketSequenceNumber = newCircular(0, MAX_SEQUENCENUMBER)
+ //cif.maxTransmissionUnitSize = 0
+ //cif.maxFlowWindowSize = 0
+ //cif.SRTSocketId = 0
+ cif.SynCookie = ln.syncookie.Get(p.Header().Addr.String())
+
+ p.MarshalCIF(cif)
+
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+
+ ln.send(p)
+ } else if cif.HandshakeType == packet.HSTYPE_CONCLUSION {
+ // Verify the SYN cookie
+ if !ln.syncookie.Verify(cif.SynCookie, p.Header().Addr.String()) {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return "invalid SYN cookie" })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ // Peer is advertising a too big MSS
+ if cif.MaxTransmissionUnitSize > MAX_MSS_SIZE {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("MTU is too big (%d bytes)", cif.MaxTransmissionUnitSize) })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ // If the peer has a smaller MTU size, adjust to it
+ if cif.MaxTransmissionUnitSize < config.MSS {
+ config.MSS = cif.MaxTransmissionUnitSize
+ config.PayloadSize = config.MSS - SRT_HEADER_SIZE - UDP_HEADER_SIZE
+
+ if config.PayloadSize < MIN_PAYLOAD_SIZE {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("payload size is too small (%d bytes)", config.PayloadSize) })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+ }
+ }
+
+ // We only support HSv4 and HSv5
+ if cif.Version == 4 {
+ // Check if the type (encryption field + extension field) has the value 2
+ if cif.EncryptionField != 0 || cif.ExtensionField != 2 {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return "invalid type, expecting a value of 2 (UDT_DGRAM)" })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+ } else if cif.Version == 5 {
+ if cif.SRTHS == nil {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return "missing handshake extension" })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ // Check if the peer version is sufficient
+ if cif.SRTHS.SRTVersion < config.MinVersion {
+ cif.HandshakeType = packet.HandshakeType(REJ_VERSION)
+ ln.log("handshake:recv:error", func() string {
+ return fmt.Sprintf("peer version insufficient (%#06x), expecting at least %#06x", cif.SRTHS.SRTVersion, config.MinVersion)
+ })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ // Check the required SRT flags
+ if !cif.SRTHS.SRTFlags.TSBPDSND || !cif.SRTHS.SRTFlags.TSBPDRCV || !cif.SRTHS.SRTFlags.TLPKTDROP || !cif.SRTHS.SRTFlags.PERIODICNAK || !cif.SRTHS.SRTFlags.REXMITFLG {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return "not all required flags are set" })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ // We only support live streaming
+ if cif.SRTHS.SRTFlags.STREAM {
+ cif.HandshakeType = packet.HandshakeType(REJ_MESSAGEAPI)
+ ln.log("handshake:recv:error", func() string { return "only live streaming is supported" })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+ } else {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("only HSv4 and HSv5 are supported (got HSv%d)", cif.Version) })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ req := &connRequest{
+ ln: ln,
+ addr: p.Header().Addr,
+ start: time.Now(),
+ socketId: cif.SRTSocketId,
+ timestamp: p.Header().Timestamp,
+ config: config,
+ handshake: cif,
+ }
+
+ if cif.SRTKM != nil {
+ cr, err := crypto.New(int(cif.SRTKM.KLen))
+ if err != nil {
+ cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("crypto: %s", err) })
+ p.MarshalCIF(cif)
+ ln.log("handshake:send:dump", func() string { return p.Dump() })
+ ln.log("handshake:send:cif", func() string { return cif.String() })
+ ln.send(p)
+
+ return nil
+ }
+
+ req.crypto = cr
+ }
+
+ ln.lock.Lock()
+ _, exists := ln.connReqs[cif.SRTSocketId]
+ if !exists {
+ ln.connReqs[cif.SRTSocketId] = req
+ }
+ ln.lock.Unlock()
+
+ // we received a duplicate request: reject silently
+ if exists {
+ return nil
+ }
+
+ return req
+ } else {
+ if cif.HandshakeType.IsRejection() {
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("connection rejected: %s", cif.HandshakeType.String()) })
+ } else {
+ ln.log("handshake:recv:error", func() string { return fmt.Sprintf("unsupported handshake: %s", cif.HandshakeType.String()) })
+ }
+ }
+
+ return nil
+}
+
+func (req *connRequest) RemoteAddr() net.Addr {
+ addr, _ := net.ResolveUDPAddr("udp", req.addr.String())
+ return addr
+}
+
+func (req *connRequest) Version() uint32 {
+ return req.handshake.Version
+}
+
+func (req *connRequest) StreamId() string {
+ return req.handshake.StreamId
+}
+
+func (req *connRequest) IsEncrypted() bool {
+ return req.crypto != nil
+}
+
+func (req *connRequest) SetPassphrase(passphrase string) error {
+ if req.handshake.Version == 5 {
+ if req.crypto == nil {
+ return fmt.Errorf("listen: request without encryption")
+ }
+
+ if err := req.crypto.UnmarshalKM(req.handshake.SRTKM, passphrase); err != nil {
+ return err
+ }
+ }
+
+ req.passphrase = passphrase
+
+ return nil
+}
+
+func (req *connRequest) SetRejectionReason(reason RejectionReason) {
+ req.rejectionReason = reason
+}
+
+func (req *connRequest) Reject(reason RejectionReason) {
+ req.ln.lock.Lock()
+ defer req.ln.lock.Unlock()
+
+ if _, hasReq := req.ln.connReqs[req.socketId]; !hasReq {
+ return
+ }
+
+ p := packet.NewPacket(req.addr)
+ p.Header().IsControlPacket = true
+ p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
+ p.Header().SubType = 0
+ p.Header().TypeSpecific = 0
+ p.Header().Timestamp = uint32(time.Since(req.ln.start).Microseconds())
+ p.Header().DestinationSocketId = req.socketId
+ req.handshake.HandshakeType = packet.HandshakeType(reason)
+ p.MarshalCIF(req.handshake)
+ req.ln.log("handshake:send:dump", func() string { return p.Dump() })
+ req.ln.log("handshake:send:cif", func() string { return req.handshake.String() })
+ req.ln.send(p)
+
+ delete(req.ln.connReqs, req.socketId)
+}
+
+func (req *connRequest) Accept() (Conn, error) {
+ if req.crypto != nil && len(req.passphrase) == 0 {
+ req.Reject(REJ_BADSECRET)
+ return nil, fmt.Errorf("passphrase is missing")
+ }
+
+ req.ln.lock.Lock()
+ defer req.ln.lock.Unlock()
+
+ if _, hasReq := req.ln.connReqs[req.socketId]; !hasReq {
+ return nil, fmt.Errorf("connection already accepted")
+ }
+
+ // Create a new socket ID
+ socketId := uint32(time.Since(req.ln.start).Microseconds())
+
+ // Select the largest TSBPD delay advertised by the caller, but at least 120ms
+ recvTsbpdDelay := uint16(req.config.ReceiverLatency.Milliseconds())
+ sendTsbpdDelay := uint16(req.config.PeerLatency.Milliseconds())
+
+ if req.handshake.Version == 5 {
+ if req.handshake.SRTHS.SendTSBPDDelay > recvTsbpdDelay {
+ recvTsbpdDelay = req.handshake.SRTHS.SendTSBPDDelay
+ }
+
+ if req.handshake.SRTHS.RecvTSBPDDelay > sendTsbpdDelay {
+ sendTsbpdDelay = req.handshake.SRTHS.RecvTSBPDDelay
+ }
+
+ req.config.StreamId = req.handshake.StreamId
+ }
+
+ req.config.Passphrase = req.passphrase
+
+ // Create a new connection
+ conn := newSRTConn(srtConnConfig{
+ version: req.handshake.Version,
+ localAddr: req.ln.addr,
+ remoteAddr: req.addr,
+ config: req.config,
+ start: req.start,
+ socketId: socketId,
+ peerSocketId: req.handshake.SRTSocketId,
+ tsbpdTimeBase: uint64(req.timestamp),
+ tsbpdDelay: uint64(recvTsbpdDelay) * 1000,
+ peerTsbpdDelay: uint64(sendTsbpdDelay) * 1000,
+ initialPacketSequenceNumber: req.handshake.InitialPacketSequenceNumber,
+ crypto: req.crypto,
+ keyBaseEncryption: packet.EvenKeyEncrypted,
+ onSend: req.ln.send,
+ onShutdown: req.ln.handleShutdown,
+ logger: req.config.Logger,
+ })
+
+ req.ln.log("connection:new", func() string { return fmt.Sprintf("%#08x (%s)", conn.SocketId(), conn.StreamId()) })
+
+ req.handshake.SRTSocketId = socketId
+ req.handshake.SynCookie = 0
+
+ if req.handshake.Version == 5 {
+ // 3.2.1.1.1. Handshake Extension Message Flags
+ req.handshake.SRTHS.SRTVersion = SRT_VERSION
+ req.handshake.SRTHS.SRTFlags.TSBPDSND = true
+ req.handshake.SRTHS.SRTFlags.TSBPDRCV = true
+ req.handshake.SRTHS.SRTFlags.CRYPT = true
+ req.handshake.SRTHS.SRTFlags.TLPKTDROP = true
+ req.handshake.SRTHS.SRTFlags.PERIODICNAK = true
+ req.handshake.SRTHS.SRTFlags.REXMITFLG = true
+ req.handshake.SRTHS.SRTFlags.STREAM = false
+ req.handshake.SRTHS.SRTFlags.PACKET_FILTER = false
+ req.handshake.SRTHS.RecvTSBPDDelay = recvTsbpdDelay
+ req.handshake.SRTHS.SendTSBPDDelay = sendTsbpdDelay
+ }
+
+ p := packet.NewPacket(req.addr)
+ p.Header().IsControlPacket = true
+ p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
+ p.Header().SubType = 0
+ p.Header().TypeSpecific = 0
+ p.Header().Timestamp = uint32(time.Since(req.start).Microseconds())
+ p.Header().DestinationSocketId = req.socketId
+ p.MarshalCIF(req.handshake)
+ req.ln.log("handshake:send:dump", func() string { return p.Dump() })
+ req.ln.log("handshake:send:cif", func() string { return req.handshake.String() })
+ req.ln.send(p)
+
+ req.ln.conns[socketId] = conn
+ delete(req.ln.connReqs, req.socketId)
+
+ return conn, nil
+}
diff --git a/vendor/github.com/datarhei/gosrt/dial.go b/vendor/github.com/datarhei/gosrt/dial.go
index 115ac869..902edc5b 100644
--- a/vendor/github.com/datarhei/gosrt/dial.go
+++ b/vendor/github.com/datarhei/gosrt/dial.go
@@ -425,6 +425,14 @@ func (dl *dialer) handleHandshake(p packet.Packet) {
sendTsbpdDelay := uint16(dl.config.PeerLatency.Milliseconds())
if cif.Version == 5 {
+ if cif.SRTHS == nil {
+ dl.connChan <- connResponse{
+ conn: nil,
+ err: fmt.Errorf("missing handshake extension"),
+ }
+ return
+ }
+
// Check if the peer version is sufficient
if cif.SRTHS.SRTVersion < dl.config.MinVersion {
dl.sendShutdown(cif.SRTSocketId)
diff --git a/vendor/github.com/datarhei/gosrt/listen.go b/vendor/github.com/datarhei/gosrt/listen.go
index 1385f4ef..024d10fd 100644
--- a/vendor/github.com/datarhei/gosrt/listen.go
+++ b/vendor/github.com/datarhei/gosrt/listen.go
@@ -10,7 +10,6 @@ import (
"sync"
"time"
- "github.com/datarhei/gosrt/crypto"
srtnet "github.com/datarhei/gosrt/net"
"github.com/datarhei/gosrt/packet"
)
@@ -85,87 +84,6 @@ const (
REJX_NOROOM RejectionReason = 1507 // The data stream cannot be archived due to lacking storage space. This is in case when the request type was to send a file or the live stream to be archived.
)
-// ConnRequest is an incoming connection request
-type ConnRequest interface {
- // RemoteAddr returns the address of the peer. The returned net.Addr
- // is a copy and can be used at will.
- RemoteAddr() net.Addr
-
- // Version returns the handshake version of the incoming request. Currently
- // known versions are 4 and 5. With version 4 the StreamId will always be
- // empty and IsEncrypted will always return false. An incoming version 4
- // connection will always be publishing.
- Version() uint32
-
- // StreamId returns the streamid of the requesting connection. Use this
- // to decide what to do with the connection.
- StreamId() string
-
- // IsEncrypted returns whether the connection is encrypted. If it is
- // encrypted, use SetPassphrase to set the passphrase for decrypting.
- IsEncrypted() bool
-
- // SetPassphrase sets the passphrase in order to decrypt the incoming
- // data. Returns an error if the passphrase did not work or the connection
- // is not encrypted.
- SetPassphrase(p string) error
-
- // SetRejectionReason sets the rejection reason for the connection. If
- // no set, REJ_PEER will be used.
- SetRejectionReason(r RejectionReason)
-}
-
-// connRequest implements the ConnRequest interface
-type connRequest struct {
- addr net.Addr
- start time.Time
- socketId uint32
- timestamp uint32
-
- config Config
- handshake *packet.CIFHandshake
- crypto crypto.Crypto
- passphrase string
- rejectionReason RejectionReason
-}
-
-func (req *connRequest) RemoteAddr() net.Addr {
- addr, _ := net.ResolveUDPAddr("udp", req.addr.String())
- return addr
-}
-
-func (req *connRequest) Version() uint32 {
- return req.handshake.Version
-}
-
-func (req *connRequest) StreamId() string {
- return req.handshake.StreamId
-}
-
-func (req *connRequest) IsEncrypted() bool {
- return req.crypto != nil
-}
-
-func (req *connRequest) SetPassphrase(passphrase string) error {
- if req.handshake.Version == 5 {
- if req.crypto == nil {
- return fmt.Errorf("listen: request without encryption")
- }
-
- if err := req.crypto.UnmarshalKM(req.handshake.SRTKM, passphrase); err != nil {
- return err
- }
- }
-
- req.passphrase = passphrase
-
- return nil
-}
-
-func (req *connRequest) SetRejectionReason(reason RejectionReason) {
- req.rejectionReason = reason
-}
-
// ErrListenerClosed is returned when the listener is about to shutdown.
var ErrListenerClosed = errors.New("srt: listener closed")
@@ -175,11 +93,17 @@ type AcceptFunc func(req ConnRequest) ConnType
// Listener waits for new connections
type Listener interface {
+ // Accept2 waits for new connections.
+ // On closing the err will be ErrListenerClosed.
+ Accept2() (ConnRequest, error)
+
// Accept waits for new connections. For each new connection the AcceptFunc
// gets called. Conn is a new connection if AcceptFunc is PUBLISH or SUBSCRIBE.
// If AcceptFunc returns REJECT, Conn is nil. In case of failure error is not
// nil, Conn is nil and ConnType is REJECT. On closing the listener err will
// be ErrListenerClosed and ConnType is REJECT.
+ //
+ // Deprecated: replaced by Accept2().
Accept(AcceptFunc) (Conn, ConnType, error)
// Close closes the listener. It will stop accepting new connections and
@@ -197,9 +121,10 @@ type listener struct {
config Config
- backlog chan connRequest
- conns map[uint32]*srtConn
- lock sync.RWMutex
+ backlog chan packet.Packet
+ connReqs map[uint32]*connRequest
+ conns map[uint32]*srtConn
+ lock sync.RWMutex
start time.Time
@@ -265,9 +190,10 @@ func Listen(network, address string, config Config) (Listener, error) {
return nil, fmt.Errorf("listen: no local address")
}
+ ln.connReqs = make(map[uint32]*connRequest)
ln.conns = make(map[uint32]*srtConn)
- ln.backlog = make(chan connRequest, 128)
+ ln.backlog = make(chan packet.Packet, 128)
ln.rcvQueue = make(chan packet.Packet, 2048)
@@ -328,108 +254,57 @@ func Listen(network, address string, config Config) (Listener, error) {
return ln, nil
}
-func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) {
+func (ln *listener) Accept2() (ConnRequest, error) {
if ln.isShutdown() {
- return nil, REJECT, ErrListenerClosed
+ return nil, ErrListenerClosed
}
- select {
- case <-ln.doneChan:
- return nil, REJECT, ln.error()
- case request := <-ln.backlog:
- if acceptFn == nil {
- ln.reject(request, REJ_PEER)
- break
- }
+ for {
+ select {
+ case <-ln.doneChan:
+ return nil, ln.error()
- mode := acceptFn(&request)
- if mode != PUBLISH && mode != SUBSCRIBE {
- // Figure out the reason
- reason := REJ_PEER
- if request.rejectionReason > 0 {
- reason = request.rejectionReason
+ case p := <-ln.backlog:
+ req := newConnRequest(ln, p)
+ if req == nil {
+ break
}
- ln.reject(request, reason)
- break
- }
- if request.crypto != nil && len(request.passphrase) == 0 {
- ln.reject(request, REJ_BADSECRET)
- break
+ return req, nil
}
+ }
+}
- // Create a new socket ID
- socketId := uint32(time.Since(ln.start).Microseconds())
-
- // Select the largest TSBPD delay advertised by the caller, but at least 120ms
- recvTsbpdDelay := uint16(request.config.ReceiverLatency.Milliseconds())
- sendTsbpdDelay := uint16(request.config.PeerLatency.Milliseconds())
+func (ln *listener) Accept(acceptFn AcceptFunc) (Conn, ConnType, error) {
+ for {
+ req, err := ln.Accept2()
+ if err != nil {
+ return nil, REJECT, err
+ }
- if request.handshake.Version == 5 {
- if request.handshake.SRTHS.SendTSBPDDelay > recvTsbpdDelay {
- recvTsbpdDelay = request.handshake.SRTHS.SendTSBPDDelay
- }
+ if acceptFn == nil {
+ req.Reject(REJ_PEER)
+ continue
+ }
- if request.handshake.SRTHS.RecvTSBPDDelay > sendTsbpdDelay {
- sendTsbpdDelay = request.handshake.SRTHS.RecvTSBPDDelay
+ mode := acceptFn(req)
+ if mode != PUBLISH && mode != SUBSCRIBE {
+ // Figure out the reason
+ reason := REJ_PEER
+ if req.(*connRequest).rejectionReason > 0 {
+ reason = req.(*connRequest).rejectionReason
}
-
- request.config.StreamId = request.handshake.StreamId
+ req.Reject(reason)
+ continue
}
- request.config.Passphrase = request.passphrase
-
- // Create a new connection
- conn := newSRTConn(srtConnConfig{
- version: request.handshake.Version,
- localAddr: ln.addr,
- remoteAddr: request.addr,
- config: request.config,
- start: request.start,
- socketId: socketId,
- peerSocketId: request.handshake.SRTSocketId,
- tsbpdTimeBase: uint64(request.timestamp),
- tsbpdDelay: uint64(recvTsbpdDelay) * 1000,
- peerTsbpdDelay: uint64(sendTsbpdDelay) * 1000,
- initialPacketSequenceNumber: request.handshake.InitialPacketSequenceNumber,
- crypto: request.crypto,
- keyBaseEncryption: packet.EvenKeyEncrypted,
- onSend: ln.send,
- onShutdown: ln.handleShutdown,
- logger: request.config.Logger,
- })
-
- ln.log("connection:new", func() string { return fmt.Sprintf("%#08x (%s) %s", conn.SocketId(), conn.StreamId(), mode) })
-
- request.handshake.SRTSocketId = socketId
- request.handshake.SynCookie = 0
-
- if request.handshake.Version == 5 {
- // 3.2.1.1.1. Handshake Extension Message Flags
- request.handshake.SRTHS.SRTVersion = SRT_VERSION
- request.handshake.SRTHS.SRTFlags.TSBPDSND = true
- request.handshake.SRTHS.SRTFlags.TSBPDRCV = true
- request.handshake.SRTHS.SRTFlags.CRYPT = true
- request.handshake.SRTHS.SRTFlags.TLPKTDROP = true
- request.handshake.SRTHS.SRTFlags.PERIODICNAK = true
- request.handshake.SRTHS.SRTFlags.REXMITFLG = true
- request.handshake.SRTHS.SRTFlags.STREAM = false
- request.handshake.SRTHS.SRTFlags.PACKET_FILTER = false
- request.handshake.SRTHS.RecvTSBPDDelay = recvTsbpdDelay
- request.handshake.SRTHS.SendTSBPDDelay = sendTsbpdDelay
+ conn, err := req.Accept()
+ if err != nil {
+ continue
}
- ln.accept(request)
-
- // Add the connection to the list of known connections
- ln.lock.Lock()
- ln.conns[socketId] = conn
- ln.lock.Unlock()
-
return conn, mode, nil
}
-
- return nil, REJECT, nil
}
// markDone marks the listener as done by closing
@@ -457,47 +332,6 @@ func (ln *listener) handleShutdown(socketId uint32) {
ln.lock.Unlock()
}
-func (ln *listener) reject(request connRequest, reason RejectionReason) {
- p := packet.NewPacket(request.addr)
- p.Header().IsControlPacket = true
-
- p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
- p.Header().SubType = 0
- p.Header().TypeSpecific = 0
-
- p.Header().Timestamp = uint32(time.Since(ln.start).Microseconds())
- p.Header().DestinationSocketId = request.socketId
-
- request.handshake.HandshakeType = packet.HandshakeType(reason)
-
- p.MarshalCIF(request.handshake)
-
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return request.handshake.String() })
-
- ln.send(p)
-}
-
-func (ln *listener) accept(request connRequest) {
- p := packet.NewPacket(request.addr)
-
- p.Header().IsControlPacket = true
-
- p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
- p.Header().SubType = 0
- p.Header().TypeSpecific = 0
-
- p.Header().Timestamp = uint32(time.Since(request.start).Microseconds())
- p.Header().DestinationSocketId = request.socketId
-
- p.MarshalCIF(request.handshake)
-
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return request.handshake.String() })
-
- ln.send(p)
-}
-
func (ln *listener) isShutdown() bool {
ln.shutdownLock.RLock()
defer ln.shutdownLock.RUnlock()
@@ -555,9 +389,12 @@ func (ln *listener) reader(ctx context.Context) {
if p.Header().DestinationSocketId == 0 {
if p.Header().IsControlPacket && p.Header().ControlType == packet.CTRLTYPE_HANDSHAKE {
- ln.handleHandshake(p)
+ select {
+ case ln.backlog <- p:
+ default:
+ ln.log("handshake:recv:error", func() string { return "backlog is full" })
+ }
}
-
break
}
@@ -601,199 +438,6 @@ func (ln *listener) send(p packet.Packet) {
}
}
-func (ln *listener) handleHandshake(p packet.Packet) {
- cif := &packet.CIFHandshake{}
-
- err := p.UnmarshalCIF(cif)
-
- ln.log("handshake:recv:dump", func() string { return p.Dump() })
- ln.log("handshake:recv:cif", func() string { return cif.String() })
-
- if err != nil {
- ln.log("handshake:recv:error", func() string { return err.Error() })
- return
- }
-
- // Assemble the response (4.3.1. Caller-Listener Handshake)
-
- p.Header().ControlType = packet.CTRLTYPE_HANDSHAKE
- p.Header().SubType = 0
- p.Header().TypeSpecific = 0
- p.Header().Timestamp = uint32(time.Since(ln.start).Microseconds())
- p.Header().DestinationSocketId = cif.SRTSocketId
-
- cif.PeerIP.FromNetAddr(ln.addr)
-
- // Create a copy of the configuration for the connection
- config := ln.config
-
- if cif.HandshakeType == packet.HSTYPE_INDUCTION {
- // cif
- cif.Version = 5
- cif.EncryptionField = 0 // Don't advertise any specific encryption method
- cif.ExtensionField = 0x4A17
- //cif.initialPacketSequenceNumber = newCircular(0, MAX_SEQUENCENUMBER)
- //cif.maxTransmissionUnitSize = 0
- //cif.maxFlowWindowSize = 0
- //cif.SRTSocketId = 0
- cif.SynCookie = ln.syncookie.Get(p.Header().Addr.String())
-
- p.MarshalCIF(cif)
-
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
-
- ln.send(p)
- } else if cif.HandshakeType == packet.HSTYPE_CONCLUSION {
- // Verify the SYN cookie
- if !ln.syncookie.Verify(cif.SynCookie, p.Header().Addr.String()) {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return "invalid SYN cookie" })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- // Peer is advertising a too big MSS
- if cif.MaxTransmissionUnitSize > MAX_MSS_SIZE {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("MTU is too big (%d bytes)", cif.MaxTransmissionUnitSize) })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- // If the peer has a smaller MTU size, adjust to it
- if cif.MaxTransmissionUnitSize < config.MSS {
- config.MSS = cif.MaxTransmissionUnitSize
- config.PayloadSize = config.MSS - SRT_HEADER_SIZE - UDP_HEADER_SIZE
-
- if config.PayloadSize < MIN_PAYLOAD_SIZE {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("payload size is too small (%d bytes)", config.PayloadSize) })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
- }
- }
-
- // We only support HSv4 and HSv5
- if cif.Version == 4 {
- // Check if the type (encryption field + extension field) has the value 2
- if cif.EncryptionField != 0 || cif.ExtensionField != 2 {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return "invalid type, expecting a value of 2 (UDT_DGRAM)" })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
- } else if cif.Version == 5 {
- // Check if the peer version is sufficient
- if cif.SRTHS.SRTVersion < config.MinVersion {
- cif.HandshakeType = packet.HandshakeType(REJ_VERSION)
- ln.log("handshake:recv:error", func() string {
- return fmt.Sprintf("peer version insufficient (%#06x), expecting at least %#06x", cif.SRTHS.SRTVersion, config.MinVersion)
- })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- // Check the required SRT flags
- if !cif.SRTHS.SRTFlags.TSBPDSND || !cif.SRTHS.SRTFlags.TSBPDRCV || !cif.SRTHS.SRTFlags.TLPKTDROP || !cif.SRTHS.SRTFlags.PERIODICNAK || !cif.SRTHS.SRTFlags.REXMITFLG {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return "not all required flags are set" })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- // We only support live streaming
- if cif.SRTHS.SRTFlags.STREAM {
- cif.HandshakeType = packet.HandshakeType(REJ_MESSAGEAPI)
- ln.log("handshake:recv:error", func() string { return "only live streaming is supported" })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
- } else {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("only HSv4 and HSv5 are supported (got HSv%d)", cif.Version) })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- // Fill up a connection request with all relevant data and put it into the backlog
-
- c := connRequest{
- addr: p.Header().Addr,
- start: time.Now(),
- socketId: cif.SRTSocketId,
- timestamp: p.Header().Timestamp,
- config: config,
-
- handshake: cif,
- }
-
- if cif.SRTKM != nil {
- cr, err := crypto.New(int(cif.SRTKM.KLen))
- if err != nil {
- cif.HandshakeType = packet.HandshakeType(REJ_ROGUE)
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("crypto: %s", err) })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
-
- return
- }
-
- c.crypto = cr
- }
-
- // If the backlog is full, reject the connection
- select {
- case ln.backlog <- c:
- default:
- cif.HandshakeType = packet.HandshakeType(REJ_BACKLOG)
- ln.log("handshake:recv:error", func() string { return "backlog is full" })
- p.MarshalCIF(cif)
- ln.log("handshake:send:dump", func() string { return p.Dump() })
- ln.log("handshake:send:cif", func() string { return cif.String() })
- ln.send(p)
- }
- } else {
- if cif.HandshakeType.IsRejection() {
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("connection rejected: %s", cif.HandshakeType.String()) })
- } else {
- ln.log("handshake:recv:error", func() string { return fmt.Sprintf("unsupported handshake: %s", cif.HandshakeType.String()) })
- }
- }
-}
-
func (ln *listener) log(topic string, message func() string) {
if ln.config.Logger == nil {
return
diff --git a/vendor/github.com/datarhei/gosrt/server.go b/vendor/github.com/datarhei/gosrt/server.go
index 4f5e0f1d..61466662 100644
--- a/vendor/github.com/datarhei/gosrt/server.go
+++ b/vendor/github.com/datarhei/gosrt/server.go
@@ -75,7 +75,7 @@ func (s *Server) Listen() error {
func (s *Server) Serve() error {
for {
// Wait for connections.
- conn, mode, err := s.ln.Accept(s.HandleConnect)
+ req, err := s.ln.Accept2()
if err != nil {
if err == ErrListenerClosed {
return ErrServerClosed
@@ -84,16 +84,30 @@ func (s *Server) Serve() error {
return err
}
- if conn == nil {
- // rejected connection, ignore
+ if s.HandleConnect == nil {
+ req.Reject(REJ_PEER)
continue
}
- if mode == PUBLISH {
- go s.HandlePublish(conn)
- } else {
- go s.HandleSubscribe(conn)
- }
+ go func(req ConnRequest) {
+ mode := s.HandleConnect(req)
+ if mode == REJECT {
+ req.Reject(REJ_PEER)
+ return
+ }
+
+ conn, err := req.Accept()
+ if err != nil {
+ // rejected connection, ignore
+ return
+ }
+
+ if mode == PUBLISH {
+ s.HandlePublish(conn)
+ } else {
+ s.HandleSubscribe(conn)
+ }
+ }(req)
}
}
diff --git a/vendor/github.com/dolthub/maphash/.gitignore b/vendor/github.com/dolthub/maphash/.gitignore
new file mode 100644
index 00000000..977a7cad
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/.gitignore
@@ -0,0 +1,2 @@
+*.idea
+*.test
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/maphash/LICENSE b/vendor/github.com/dolthub/maphash/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/dolthub/maphash/README.md b/vendor/github.com/dolthub/maphash/README.md
new file mode 100644
index 00000000..d91530f9
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/README.md
@@ -0,0 +1,4 @@
+# maphash
+
+Hash any `comparable` type using Golang's fast runtime hash.
+Uses [AES](https://en.wikipedia.org/wiki/AES_instruction_set) instructions when available.
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/maphash/hasher.go b/vendor/github.com/dolthub/maphash/hasher.go
new file mode 100644
index 00000000..ef53596a
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/hasher.go
@@ -0,0 +1,48 @@
+// Copyright 2022 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package maphash
+
+import "unsafe"
+
+// Hasher hashes values of type K.
+// Uses runtime AES-based hashing.
+type Hasher[K comparable] struct {
+ hash hashfn
+ seed uintptr
+}
+
+// NewHasher creates a new Hasher[K] with a random seed.
+func NewHasher[K comparable]() Hasher[K] {
+ return Hasher[K]{
+ hash: getRuntimeHasher[K](),
+ seed: newHashSeed(),
+ }
+}
+
+// NewSeed returns a copy of |h| with a new hash seed.
+func NewSeed[K comparable](h Hasher[K]) Hasher[K] {
+ return Hasher[K]{
+ hash: h.hash,
+ seed: newHashSeed(),
+ }
+}
+
+// Hash hashes |key|.
+func (h Hasher[K]) Hash(key K) uint64 {
+ // promise to the compiler that pointer
+ // |p| does not escape the stack.
+ p := noescape(unsafe.Pointer(&key))
+ return uint64(h.hash(p, h.seed))
+}
diff --git a/vendor/github.com/dolthub/maphash/runtime.go b/vendor/github.com/dolthub/maphash/runtime.go
new file mode 100644
index 00000000..29cd6a8e
--- /dev/null
+++ b/vendor/github.com/dolthub/maphash/runtime.go
@@ -0,0 +1,111 @@
+// Copyright 2022 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file incorporates work covered by the following copyright and
+// permission notice:
+//
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.18 || go1.19
+// +build go1.18 go1.19
+
+package maphash
+
+import (
+ "math/rand"
+ "unsafe"
+)
+
+type hashfn func(unsafe.Pointer, uintptr) uintptr
+
+func getRuntimeHasher[K comparable]() (h hashfn) {
+ a := any(make(map[K]struct{}))
+ i := (*mapiface)(unsafe.Pointer(&a))
+ h = i.typ.hasher
+ return
+}
+
+func newHashSeed() uintptr {
+ return uintptr(rand.Int())
+}
+
+// noescape hides a pointer from escape analysis. It is the identity function
+// but escape analysis doesn't think the output depends on the input.
+// noescape is inlined and currently compiles down to zero instructions.
+// USE CAREFULLY!
+// This was copied from the runtime (via pkg "strings"); see issues 23382 and 7921.
+//
+//go:nosplit
+//go:nocheckptr
+func noescape(p unsafe.Pointer) unsafe.Pointer {
+ x := uintptr(p)
+ return unsafe.Pointer(x ^ 0)
+}
+
+type mapiface struct {
+ typ *maptype
+ val *hmap
+}
+
+// go/src/runtime/type.go
+type maptype struct {
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
+ keysize uint8
+ elemsize uint8
+ bucketsize uint16
+ flags uint32
+}
+
+// go/src/runtime/map.go
+type hmap struct {
+ count int
+ flags uint8
+ B uint8
+ noverflow uint16
+ // hash seed
+ hash0 uint32
+ buckets unsafe.Pointer
+ oldbuckets unsafe.Pointer
+ nevacuate uintptr
+ // true type is *mapextra
+ // but we don't need this data
+ extra unsafe.Pointer
+}
+
+// go/src/runtime/type.go
+type tflag uint8
+type nameOff int32
+type typeOff int32
+
+// go/src/runtime/type.go
+type _type struct {
+ size uintptr
+ ptrdata uintptr
+ hash uint32
+ tflag tflag
+ align uint8
+ fieldAlign uint8
+ kind uint8
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte
+ str nameOff
+ ptrToThis typeOff
+}
diff --git a/vendor/github.com/dolthub/swiss/.gitignore b/vendor/github.com/dolthub/swiss/.gitignore
new file mode 100644
index 00000000..1f9adf93
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/.gitignore
@@ -0,0 +1,5 @@
+**/.idea/
+.vscode
+.run
+venv
+.DS_Store
\ No newline at end of file
diff --git a/vendor/github.com/dolthub/swiss/LICENSE b/vendor/github.com/dolthub/swiss/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/dolthub/swiss/README.md b/vendor/github.com/dolthub/swiss/README.md
new file mode 100644
index 00000000..71c6f7dd
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/README.md
@@ -0,0 +1,54 @@
+# SwissMap
+
+SwissMap is a hash table adapated from the "SwissTable" family of hash tables from [Abseil](https://abseil.io/blog/20180927-swisstables). It uses [AES](https://github.com/dolthub/maphash) instructions for fast-hashing and performs key lookups in parallel using [SSE](https://en.wikipedia.org/wiki/Streaming_SIMD_Extensions) instructions. Because of these optimizations, SwissMap is faster and more memory efficient than Golang's built-in `map`. If you'd like to learn more about its design and implementation, check out this [blog post](https://www.dolthub.com/blog/2023-03-28-swiss-map/) announcing its release.
+
+
+## Example
+
+SwissMap exposes the same interface as the built-in `map`. Give it a try using this [Go playground](https://go.dev/play/p/JPDC5WhYN7g).
+
+```go
+package main
+
+import "github.com/dolthub/swiss"
+
+func main() {
+ m := swiss.NewMap[string, int](42)
+
+ m.Put("foo", 1)
+ m.Put("bar", 2)
+
+ m.Iter(func(k string, v int) (stop bool) {
+ println("iter", k, v)
+ return false // continue
+ })
+
+ if x, ok := m.Get("foo"); ok {
+ println(x)
+ }
+ if m.Has("bar") {
+ x, _ := m.Get("bar")
+ println(x)
+ }
+
+ m.Put("foo", -1)
+ m.Delete("bar")
+
+ if x, ok := m.Get("foo"); ok {
+ println(x)
+ }
+ if m.Has("bar") {
+ x, _ := m.Get("bar")
+ println(x)
+ }
+
+ m.Clear()
+
+ // Output:
+ // iter foo 1
+ // iter bar 2
+ // 1
+ // 2
+ // -1
+}
+```
diff --git a/vendor/github.com/dolthub/swiss/bits.go b/vendor/github.com/dolthub/swiss/bits.go
new file mode 100644
index 00000000..f435b6dc
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/bits.go
@@ -0,0 +1,58 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !amd64 || nosimd
+
+package swiss
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+const (
+ groupSize = 8
+ maxAvgGroupLoad = 7
+
+ loBits uint64 = 0x0101010101010101
+ hiBits uint64 = 0x8080808080808080
+)
+
+type bitset uint64
+
+func metaMatchH2(m *metadata, h h2) bitset {
+ // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ return hasZeroByte(castUint64(m) ^ (loBits * uint64(h)))
+}
+
+func metaMatchEmpty(m *metadata) bitset {
+ return hasZeroByte(castUint64(m) ^ hiBits)
+}
+
+func nextMatch(b *bitset) uint32 {
+ s := uint32(bits.TrailingZeros64(uint64(*b)))
+ *b &= ^(1 << s) // clear bit |s|
+ return s >> 3 // div by 8
+}
+
+func hasZeroByte(x uint64) bitset {
+ return bitset(((x - loBits) & ^(x)) & hiBits)
+}
+
+func castUint64(m *metadata) uint64 {
+ return *(*uint64)((unsafe.Pointer)(m))
+}
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
diff --git a/vendor/github.com/dolthub/swiss/bits_amd64.go b/vendor/github.com/dolthub/swiss/bits_amd64.go
new file mode 100644
index 00000000..8b91f57c
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/bits_amd64.go
@@ -0,0 +1,50 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build amd64 && !nosimd
+
+package swiss
+
+import (
+ "math/bits"
+ _ "unsafe"
+
+ "github.com/dolthub/swiss/simd"
+)
+
+const (
+ groupSize = 16
+ maxAvgGroupLoad = 14
+)
+
+type bitset uint16
+
+func metaMatchH2(m *metadata, h h2) bitset {
+ b := simd.MatchMetadata((*[16]int8)(m), int8(h))
+ return bitset(b)
+}
+
+func metaMatchEmpty(m *metadata) bitset {
+ b := simd.MatchMetadata((*[16]int8)(m), empty)
+ return bitset(b)
+}
+
+func nextMatch(b *bitset) (s uint32) {
+ s = uint32(bits.TrailingZeros16(uint16(*b)))
+ *b &= ^(1 << s) // clear bit |s|
+ return
+}
+
+//go:linkname fastrand runtime.fastrand
+func fastrand() uint32
diff --git a/vendor/github.com/dolthub/swiss/map.go b/vendor/github.com/dolthub/swiss/map.go
new file mode 100644
index 00000000..e5ad2038
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/map.go
@@ -0,0 +1,359 @@
+// Copyright 2023 Dolthub, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swiss
+
+import (
+ "github.com/dolthub/maphash"
+)
+
+const (
+ maxLoadFactor = float32(maxAvgGroupLoad) / float32(groupSize)
+)
+
+// Map is an open-addressing hash map
+// based on Abseil's flat_hash_map.
+type Map[K comparable, V any] struct {
+ ctrl []metadata
+ groups []group[K, V]
+ hash maphash.Hasher[K]
+ resident uint32
+ dead uint32
+ limit uint32
+}
+
+// metadata is the h2 metadata array for a group.
+// find operations first probe the controls bytes
+// to filter candidates before matching keys
+type metadata [groupSize]int8
+
+// group is a group of 16 key-value pairs
+type group[K comparable, V any] struct {
+ keys [groupSize]K
+ values [groupSize]V
+}
+
+const (
+ h1Mask uint64 = 0xffff_ffff_ffff_ff80
+ h2Mask uint64 = 0x0000_0000_0000_007f
+ empty int8 = -128 // 0b1000_0000
+ tombstone int8 = -2 // 0b1111_1110
+)
+
+// h1 is a 57 bit hash prefix
+type h1 uint64
+
+// h2 is a 7 bit hash suffix
+type h2 int8
+
+// NewMap constructs a Map.
+func NewMap[K comparable, V any](sz uint32) (m *Map[K, V]) {
+ groups := numGroups(sz)
+ m = &Map[K, V]{
+ ctrl: make([]metadata, groups),
+ groups: make([]group[K, V], groups),
+ hash: maphash.NewHasher[K](),
+ limit: groups * maxAvgGroupLoad,
+ }
+ for i := range m.ctrl {
+ m.ctrl[i] = newEmptyMetadata()
+ }
+ return
+}
+
+// Has returns true if |key| is present in |m|.
+func (m *Map[K, V]) Has(key K) (ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ ok = true
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Get returns the |value| mapped by |key| if one exists.
+func (m *Map[K, V]) Get(key K) (value V, ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ value, ok = m.groups[g].values[s], true
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Put attempts to insert |key| and |value|
+func (m *Map[K, V]) Put(key K, value V) {
+ if m.resident >= m.limit {
+ m.rehash(m.nextSize())
+ }
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for { // inlined find loop
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] { // update
+ m.groups[g].keys[s] = key
+ m.groups[g].values[s] = value
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 { // insert
+ s := nextMatch(&matches)
+ m.groups[g].keys[s] = key
+ m.groups[g].values[s] = value
+ m.ctrl[g][s] = int8(lo)
+ m.resident++
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Delete attempts to remove |key|, returns true successful.
+func (m *Map[K, V]) Delete(key K) (ok bool) {
+ hi, lo := splitHash(m.hash.Hash(key))
+ g := probeStart(hi, len(m.groups))
+ for {
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s := nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ ok = true
+ // optimization: if |m.ctrl[g]| contains any empty
+ // metadata bytes, we can physically delete |key|
+ // rather than placing a tombstone.
+ // The observation is that any probes into group |g|
+ // would already be terminated by the existing empty
+ // slot, and therefore reclaiming slot |s| will not
+ // cause premature termination of probes into |g|.
+ if metaMatchEmpty(&m.ctrl[g]) != 0 {
+ m.ctrl[g][s] = empty
+ m.resident--
+ } else {
+ m.ctrl[g][s] = tombstone
+ m.dead++
+ }
+ var k K
+ var v V
+ m.groups[g].keys[s] = k
+ m.groups[g].values[s] = v
+ return
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 { // |key| absent
+ ok = false
+ return
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+// Iter iterates the elements of the Map, passing them to the callback.
+// It guarantees that any key in the Map will be visited only once, and
+// for un-mutated Maps, every key will be visited once. If the Map is
+// Mutated during iteration, mutations will be reflected on return from
+// Iter, but the set of keys visited by Iter is non-deterministic.
+func (m *Map[K, V]) Iter(cb func(k K, v V) (stop bool)) {
+ // take a consistent view of the table in case
+ // we rehash during iteration
+ ctrl, groups := m.ctrl, m.groups
+ // pick a random starting group
+ g := randIntN(len(groups))
+ for n := 0; n < len(groups); n++ {
+ for s, c := range ctrl[g] {
+ if c == empty || c == tombstone {
+ continue
+ }
+ k, v := groups[g].keys[s], groups[g].values[s]
+ if stop := cb(k, v); stop {
+ return
+ }
+ }
+ g++
+ if g >= uint32(len(groups)) {
+ g = 0
+ }
+ }
+}
+
+// Clear removes all elements from the Map.
+func (m *Map[K, V]) Clear() {
+ for i, c := range m.ctrl {
+ for j := range c {
+ m.ctrl[i][j] = empty
+ }
+ }
+ var k K
+ var v V
+ for i := range m.groups {
+ g := &m.groups[i]
+ for i := range g.keys {
+ g.keys[i] = k
+ g.values[i] = v
+ }
+ }
+ m.resident, m.dead = 0, 0
+}
+
+// Count returns the number of elements in the Map.
+func (m *Map[K, V]) Count() int {
+ return int(m.resident - m.dead)
+}
+
+// Capacity returns the number of additional elements
+// the can be added to the Map before resizing.
+func (m *Map[K, V]) Capacity() int {
+ return int(m.limit - m.resident)
+}
+
+// find returns the location of |key| if present, or its insertion location if absent.
+// for performance, find is manually inlined into public methods.
+func (m *Map[K, V]) find(key K, hi h1, lo h2) (g, s uint32, ok bool) {
+ g = probeStart(hi, len(m.groups))
+ for {
+ matches := metaMatchH2(&m.ctrl[g], lo)
+ for matches != 0 {
+ s = nextMatch(&matches)
+ if key == m.groups[g].keys[s] {
+ return g, s, true
+ }
+ }
+ // |key| is not in group |g|,
+ // stop probing if we see an empty slot
+ matches = metaMatchEmpty(&m.ctrl[g])
+ if matches != 0 {
+ s = nextMatch(&matches)
+ return g, s, false
+ }
+ g += 1 // linear probing
+ if g >= uint32(len(m.groups)) {
+ g = 0
+ }
+ }
+}
+
+func (m *Map[K, V]) nextSize() (n uint32) {
+ n = uint32(len(m.groups)) * 2
+ if m.dead >= (m.resident / 2) {
+ n = uint32(len(m.groups))
+ }
+ return
+}
+
+func (m *Map[K, V]) rehash(n uint32) {
+ groups, ctrl := m.groups, m.ctrl
+ m.groups = make([]group[K, V], n)
+ m.ctrl = make([]metadata, n)
+ for i := range m.ctrl {
+ m.ctrl[i] = newEmptyMetadata()
+ }
+ m.hash = maphash.NewSeed(m.hash)
+ m.limit = n * maxAvgGroupLoad
+ m.resident, m.dead = 0, 0
+ for g := range ctrl {
+ for s := range ctrl[g] {
+ c := ctrl[g][s]
+ if c == empty || c == tombstone {
+ continue
+ }
+ m.Put(groups[g].keys[s], groups[g].values[s])
+ }
+ }
+}
+
+func (m *Map[K, V]) loadFactor() float32 {
+ slots := float32(len(m.groups) * groupSize)
+ return float32(m.resident-m.dead) / slots
+}
+
+// numGroups returns the minimum number of groups needed to store |n| elems.
+func numGroups(n uint32) (groups uint32) {
+ groups = (n + maxAvgGroupLoad - 1) / maxAvgGroupLoad
+ if groups == 0 {
+ groups = 1
+ }
+ return
+}
+
+func newEmptyMetadata() (meta metadata) {
+ for i := range meta {
+ meta[i] = empty
+ }
+ return
+}
+
+func splitHash(h uint64) (h1, h2) {
+ return h1((h & h1Mask) >> 7), h2(h & h2Mask)
+}
+
+func probeStart(hi h1, groups int) uint32 {
+ return fastModN(uint32(hi), uint32(groups))
+}
+
+// lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+func fastModN(x, n uint32) uint32 {
+ return uint32((uint64(x) * uint64(n)) >> 32)
+}
+
+// randIntN returns a random number in the interval [0, n).
+func randIntN(n int) uint32 {
+ return fastModN(fastrand(), uint32(n))
+}
diff --git a/vendor/github.com/dolthub/swiss/simd/match.s b/vendor/github.com/dolthub/swiss/simd/match.s
new file mode 100644
index 00000000..4ae29e77
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/simd/match.s
@@ -0,0 +1,19 @@
+// Code generated by command: go run asm.go -out match.s -stubs match_amd64.go. DO NOT EDIT.
+
+//go:build amd64
+
+#include "textflag.h"
+
+// func MatchMetadata(metadata *[16]int8, hash int8) uint16
+// Requires: SSE2, SSSE3
+TEXT ·MatchMetadata(SB), NOSPLIT, $0-18
+ MOVQ metadata+0(FP), AX
+ MOVBLSX hash+8(FP), CX
+ MOVD CX, X0
+ PXOR X1, X1
+ PSHUFB X1, X0
+ MOVOU (AX), X1
+ PCMPEQB X1, X0
+ PMOVMSKB X0, AX
+ MOVW AX, ret+16(FP)
+ RET
diff --git a/vendor/github.com/dolthub/swiss/simd/match_amd64.go b/vendor/github.com/dolthub/swiss/simd/match_amd64.go
new file mode 100644
index 00000000..538c8e12
--- /dev/null
+++ b/vendor/github.com/dolthub/swiss/simd/match_amd64.go
@@ -0,0 +1,9 @@
+// Code generated by command: go run asm.go -out match.s -stubs match_amd64.go. DO NOT EDIT.
+
+//go:build amd64
+
+package simd
+
+// MatchMetadata performs a 16-way probe of |metadata| using SSE instructions
+// nb: |metadata| must be an aligned pointer
+func MatchMetadata(metadata *[16]int8, hash int8) uint16
diff --git a/vendor/github.com/gabriel-vasile/mimetype/README.md b/vendor/github.com/gabriel-vasile/mimetype/README.md
index fd6c533e..aa88b4bd 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/README.md
+++ b/vendor/github.com/gabriel-vasile/mimetype/README.md
@@ -81,7 +81,7 @@ To prevent loading entire files into memory, when detecting from a
or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFile)
**mimetype** limits itself to reading only the header of the input.
-
+
## Performance
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
index ee39349a..5b2ecee4 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
@@ -34,6 +34,7 @@ package json
import (
"fmt"
+ "sync"
)
type (
@@ -73,10 +74,31 @@ type (
}
)
+var scannerPool = sync.Pool{
+ New: func() any {
+ return &scanner{}
+ },
+}
+
+func newScanner() *scanner {
+ s := scannerPool.Get().(*scanner)
+ s.reset()
+ return s
+}
+
+func freeScanner(s *scanner) {
+ // Avoid hanging on to too much memory in extreme cases.
+ if len(s.parseState) > 1024 {
+ s.parseState = nil
+ }
+ scannerPool.Put(s)
+}
+
// Scan returns the number of bytes scanned and if there was any error
// in trying to reach the end of data.
func Scan(data []byte) (int, error) {
- s := &scanner{}
+ s := newScanner()
+ defer freeScanner(s)
_ = checkValid(data, s)
return s.index, s.err
}
@@ -84,7 +106,6 @@ func Scan(data []byte) (int, error) {
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
- scan.reset()
for _, c := range data {
scan.index++
if scan.step(scan, c) == scanError {
@@ -105,6 +126,8 @@ func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
+ s.endTop = false
+ s.index = 0
}
// eof tells the scanner that the end of input has been reached.
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
index 554ac4d4..b59042c6 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
@@ -3,7 +3,6 @@ package magic
import (
"bytes"
"encoding/binary"
- "strconv"
)
var (
@@ -110,8 +109,8 @@ func Tar(raw []byte, _ uint32) bool {
}
// Get the checksum recorded into the file.
- recsum, err := tarParseOctal(raw[148:156])
- if err != nil {
+ recsum := tarParseOctal(raw[148:156])
+ if recsum == -1 {
return false
}
sum1, sum2 := tarChksum(raw)
@@ -119,28 +118,26 @@ func Tar(raw []byte, _ uint32) bool {
}
// tarParseOctal converts octal string to decimal int.
-func tarParseOctal(b []byte) (int64, error) {
+func tarParseOctal(b []byte) int64 {
// Because unused fields are filled with NULs, we need to skip leading NULs.
// Fields may also be padded with spaces or NULs.
// So we remove leading and trailing NULs and spaces to be sure.
b = bytes.Trim(b, " \x00")
if len(b) == 0 {
- return 0, nil
+ return -1
}
- x, err := strconv.ParseUint(tarParseString(b), 8, 64)
- if err != nil {
- return 0, err
- }
- return int64(x), nil
-}
-
-// tarParseString converts a NUL ended bytes slice to a string.
-func tarParseString(b []byte) string {
- if i := bytes.IndexByte(b, 0); i >= 0 {
- return string(b[:i])
+ ret := int64(0)
+ for _, b := range b {
+ if b == 0 {
+ break
+ }
+ if !(b >= '0' && b <= '7') {
+ return -1
+ }
+ ret = (ret << 3) | int64(b-'0')
}
- return string(b)
+ return ret
}
// tarChksum computes the checksum for the header block b.
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
index 5964ce59..a1180173 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
@@ -6,41 +6,41 @@ import (
)
var (
- xlsxSigFiles = []string{
- "xl/worksheets/",
- "xl/drawings/",
- "xl/theme/",
- "xl/_rels/",
- "xl/styles.xml",
- "xl/workbook.xml",
- "xl/sharedStrings.xml",
- }
- docxSigFiles = []string{
- "word/media/",
- "word/_rels/document.xml.rels",
- "word/document.xml",
- "word/styles.xml",
- "word/fontTable.xml",
- "word/settings.xml",
- "word/numbering.xml",
- "word/header",
- "word/footer",
- }
- pptxSigFiles = []string{
- "ppt/slides/",
- "ppt/media/",
- "ppt/slideLayouts/",
- "ppt/theme/",
- "ppt/slideMasters/",
- "ppt/tags/",
- "ppt/notesMasters/",
- "ppt/_rels/",
- "ppt/handoutMasters/",
- "ppt/notesSlides/",
- "ppt/presentation.xml",
- "ppt/tableStyles.xml",
- "ppt/presProps.xml",
- "ppt/viewProps.xml",
+ xlsxSigFiles = [][]byte{
+ []byte("xl/worksheets/"),
+ []byte("xl/drawings/"),
+ []byte("xl/theme/"),
+ []byte("xl/_rels/"),
+ []byte("xl/styles.xml"),
+ []byte("xl/workbook.xml"),
+ []byte("xl/sharedStrings.xml"),
+ }
+ docxSigFiles = [][]byte{
+ []byte("word/media/"),
+ []byte("word/_rels/document.xml.rels"),
+ []byte("word/document.xml"),
+ []byte("word/styles.xml"),
+ []byte("word/fontTable.xml"),
+ []byte("word/settings.xml"),
+ []byte("word/numbering.xml"),
+ []byte("word/header"),
+ []byte("word/footer"),
+ }
+ pptxSigFiles = [][]byte{
+ []byte("ppt/slides/"),
+ []byte("ppt/media/"),
+ []byte("ppt/slideLayouts/"),
+ []byte("ppt/theme/"),
+ []byte("ppt/slideMasters/"),
+ []byte("ppt/tags/"),
+ []byte("ppt/notesMasters/"),
+ []byte("ppt/_rels/"),
+ []byte("ppt/handoutMasters/"),
+ []byte("ppt/notesSlides/"),
+ []byte("ppt/presentation.xml"),
+ []byte("ppt/tableStyles.xml"),
+ []byte("ppt/presProps.xml"),
+ []byte("ppt/viewProps.xml"),
}
)
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
index 9f1a637b..cf644639 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
@@ -120,7 +120,7 @@ var (
[]byte("/usr/bin/env wish"),
)
// Rtf matches a Rich Text Format file.
- Rtf = prefix([]byte("{\\rtf1"))
+ Rtf = prefix([]byte("{\\rtf"))
)
// Text matches a plain text file.
diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
index dabee947..aaa27559 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
@@ -3,7 +3,6 @@ package magic
import (
"bytes"
"encoding/binary"
- "strings"
)
var (
@@ -43,7 +42,7 @@ func Zip(raw []byte, limit uint32) bool {
// Jar matches a Java archive file.
func Jar(raw []byte, limit uint32) bool {
- return zipContains(raw, "META-INF/MANIFEST.MF")
+ return zipContains(raw, []byte("META-INF/MANIFEST.MF"))
}
// zipTokenizer holds the source zip file and scanned index.
@@ -54,7 +53,7 @@ type zipTokenizer struct {
// next returns the next file name from the zip headers.
// https://web.archive.org/web/20191129114319/https://users.cs.jmu.edu/buchhofp/forensics/formats/pkzip.html
-func (t *zipTokenizer) next() (fileName string) {
+func (t *zipTokenizer) next() (fileName []byte) {
if t.i > len(t.in) {
return
}
@@ -74,15 +73,15 @@ func (t *zipTokenizer) next() (fileName string) {
return
}
t.i += fNameOffset + fNameLen
- return string(in[fNameOffset : fNameOffset+fNameLen])
+ return in[fNameOffset : fNameOffset+fNameLen]
}
// zipContains returns true if the zip file headers from in contain any of the paths.
-func zipContains(in []byte, paths ...string) bool {
+func zipContains(in []byte, paths ...[]byte) bool {
t := zipTokenizer{in: in}
- for i, tok := 0, t.next(); tok != ""; i, tok = i+1, t.next() {
+ for tok := t.next(); len(tok) != 0; tok = t.next() {
for p := range paths {
- if strings.HasPrefix(tok, paths[p]) {
+ if bytes.HasPrefix(tok, paths[p]) {
return true
}
}
diff --git a/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif b/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif
deleted file mode 100644
index c3e80876..00000000
Binary files a/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif and /dev/null differ
diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
index 5ec6f6b6..0a24ba38 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
+++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
@@ -167,7 +167,7 @@ Extension | MIME type | Aliases
**.geojson** | application/geo+json | -
**.har** | application/json | -
**.ndjson** | application/x-ndjson | -
-**.rtf** | text/rtf | -
+**.rtf** | text/rtf | application/rtf
**.srt** | application/x-subrip | application/x-srt, text/x-srt
**.tcl** | text/x-tcl | application/x-tcl
**.csv** | text/csv | -
diff --git a/vendor/github.com/gabriel-vasile/mimetype/tree.go b/vendor/github.com/gabriel-vasile/mimetype/tree.go
index 253bd006..75dcd7b1 100644
--- a/vendor/github.com/gabriel-vasile/mimetype/tree.go
+++ b/vendor/github.com/gabriel-vasile/mimetype/tree.go
@@ -86,7 +86,7 @@ var (
ndJSON = newMIME("application/x-ndjson", ".ndjson", magic.NdJSON)
html = newMIME("text/html", ".html", magic.HTML)
php = newMIME("text/x-php", ".php", magic.Php)
- rtf = newMIME("text/rtf", ".rtf", magic.Rtf)
+ rtf = newMIME("text/rtf", ".rtf", magic.Rtf).alias("application/rtf")
js = newMIME("application/javascript", ".js", magic.Js).
alias("application/x-javascript", "text/javascript")
srt = newMIME("application/x-subrip", ".srt", magic.Srt).
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
index 9ab0705a..ddd65b07 100644
--- a/vendor/github.com/go-playground/validator/v10/README.md
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -1,7 +1,7 @@
Package validator
=================
[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.22.0-green.svg)
+![Project status](https://img.shields.io/badge/version-10.22.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
index b6fbaafa..d1a3656a 100644
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -1828,7 +1828,14 @@ func requireCheckFieldValue(
return int64(field.Len()) == asInt(value)
case reflect.Bool:
- return field.Bool() == asBool(value)
+ return field.Bool() == (value == "true")
+
+ case reflect.Ptr:
+ if field.IsNil() {
+ return value == "nil"
+ }
+ // Handle non-nil pointers
+ return requireCheckFieldValue(fl, param, value, defaultNotFoundValue)
}
// default reflect.String:
diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go
index 183f041a..cbc9a59a 100644
--- a/vendor/github.com/hashicorp/raft/raft.go
+++ b/vendor/github.com/hashicorp/raft/raft.go
@@ -1749,7 +1749,7 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) {
}()
// Check if we have an existing leader [who's not the candidate] and also
- var candidate ServerAddress
+ candidate := r.trans.DecodePeer(req.GetRPCHeader().Addr)
candidateID := ServerID(req.ID)
// if the Servers list is empty that mean the cluster is very likely trying to bootstrap,
@@ -1805,7 +1805,6 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) {
}
resp.Granted = true
- r.setLastContact()
}
// installSnapshot is invoked when we get a InstallSnapshot RPC call.
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index 05c7359e..684a3085 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -16,6 +16,20 @@ This package provides various compression algorithms.
# changelog
+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
+ * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
+ * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
+ * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
+ * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
+
+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
+ * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
+ * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
+
+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
+ * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
+ * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
+
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
@@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
- * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+ * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
@@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
- * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
+ * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
@@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
- * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
index 66d1657d..af53fb86 100644
--- a/vendor/github.com/klauspost/compress/flate/deflate.go
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) {
}
switch d.compressionLevel.chain {
case 0:
- // level was NoCompression or ConstantCompresssion.
+ // level was NoCompression or ConstantCompression.
d.windowEnd = 0
default:
s := d.state
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
index 2f410d64..0d7b437f 100644
--- a/vendor/github.com/klauspost/compress/flate/inflate.go
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -298,6 +298,14 @@ const (
huffmanGenericReader
)
+// flushMode tells decompressor when to return data
+type flushMode uint8
+
+const (
+ syncFlush flushMode = iota // return data after sync flush block
+ partialFlush // return data after each block
+)
+
// Decompress state.
type decompressor struct {
// Input source.
@@ -332,6 +340,8 @@ type decompressor struct {
nb uint
final bool
+
+ flushMode flushMode
}
func (f *decompressor) nextBlock() {
@@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() {
}
if n == 0 {
- f.toRead = f.dict.readFlush()
+ if f.flushMode == syncFlush {
+ f.toRead = f.dict.readFlush()
+ }
+
f.finishBlock()
return
}
@@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() {
if f.dict.availRead() > 0 {
f.toRead = f.dict.readFlush()
}
+
f.err = io.EOF
+ } else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
}
+
f.step = nextBlock
}
@@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error {
return nil
}
-// NewReader returns a new ReadCloser that can be used
-// to read the uncompressed version of r.
-// If r does not also implement io.ByteReader,
-// the decompressor may read more data than necessary from r.
-// It is the caller's responsibility to call Close on the ReadCloser
-// when finished reading.
-//
-// The ReadCloser returned by NewReader also implements Resetter.
-func NewReader(r io.Reader) io.ReadCloser {
+type ReaderOpt func(*decompressor)
+
+// WithPartialBlock tells decompressor to return after each block,
+// so it can read data written with partial flush
+func WithPartialBlock() ReaderOpt {
+ return func(f *decompressor) {
+ f.flushMode = partialFlush
+ }
+}
+
+// WithDict initializes the reader with a preset dictionary
+func WithDict(dict []byte) ReaderOpt {
+ return func(f *decompressor) {
+ f.dict.init(maxMatchOffset, dict)
+ }
+}
+
+// NewReaderOpts returns new reader with provided options
+func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
fixedHuffmanDecoderInit()
var f decompressor
@@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser {
f.codebits = new([numCodes]int)
f.step = nextBlock
f.dict.init(maxMatchOffset, nil)
+
+ for _, opt := range opts {
+ opt(&f)
+ }
+
return &f
}
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ return NewReaderOpts(r)
+}
+
// NewReaderDict is like NewReader but initializes the reader
// with a preset dictionary. The returned Reader behaves as if
// the uncompressed data stream started with the given dictionary,
@@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser {
//
// The ReadCloser returned by NewReader also implements Resetter.
func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
- fixedHuffmanDecoderInit()
-
- var f decompressor
- f.r = makeReader(r)
- f.bits = new([maxNumLit + maxNumDist]int)
- f.codebits = new([numCodes]int)
- f.step = nextBlock
- f.dict.init(maxMatchOffset, dict)
- return &f
+ return NewReaderOpts(r, WithDict(dict))
}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index cc05d0f7..0c7dd4ff 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@ const (
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 54bd08b2..0f56b02d 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 0 {
- fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) {
errs++
}
if errs > 20 {
- fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go
index 0a46f2b9..fd15078f 100644
--- a/vendor/github.com/klauspost/compress/s2/writer.go
+++ b/vendor/github.com/klauspost/compress/s2/writer.go
@@ -83,11 +83,14 @@ type Writer struct {
snappy bool
flushOnWrite bool
appendIndex bool
+ bufferCB func([]byte)
level uint8
}
type result struct {
b []byte
+ // return when writing
+ ret []byte
// Uncompressed start offset
startOffset int64
}
@@ -146,6 +149,10 @@ func (w *Writer) Reset(writer io.Writer) {
for write := range toWrite {
// Wait for the data to be available.
input := <-write
+ if input.ret != nil && w.bufferCB != nil {
+ w.bufferCB(input.ret)
+ input.ret = nil
+ }
in := input.b
if len(in) > 0 {
if w.err(nil) == nil {
@@ -341,7 +348,8 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) {
// but the input buffer cannot be written to by the caller
// until Flush or Close has been called when concurrency != 1.
//
-// If you cannot control that, use the regular Write function.
+// Use the WriterBufferDone to receive a callback when the buffer is done
+// Processing.
//
// Note that input is not buffered.
// This means that each write will result in discrete blocks being created.
@@ -364,6 +372,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
}
if w.concurrency == 1 {
_, err := w.writeSync(buf)
+ if w.bufferCB != nil {
+ w.bufferCB(buf)
+ }
return err
}
@@ -378,7 +389,7 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes}
}
}
-
+ orgBuf := buf
for len(buf) > 0 {
// Cut input.
uncompressed := buf
@@ -397,6 +408,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) {
startOffset: w.uncompWritten,
}
w.uncompWritten += int64(len(uncompressed))
+ if len(buf) == 0 && w.bufferCB != nil {
+ res.ret = orgBuf
+ }
go func() {
race.ReadSlice(uncompressed)
@@ -922,7 +936,7 @@ func WriterBetterCompression() WriterOption {
}
// WriterBestCompression will enable better compression.
-// EncodeBetter compresses better than Encode but typically with a
+// EncodeBest compresses better than Encode but typically with a
// big speed decrease on compression.
func WriterBestCompression() WriterOption {
return func(w *Writer) error {
@@ -941,6 +955,17 @@ func WriterUncompressed() WriterOption {
}
}
+// WriterBufferDone will perform a callback when EncodeBuffer has finished
+// writing a buffer to the output and the buffer can safely be reused.
+// If the buffer was split into several blocks, it will be sent after the last block.
+// Callbacks will not be done concurrently.
+func WriterBufferDone(fn func(b []byte)) WriterOption {
+ return func(w *Writer) error {
+ w.bufferCB = fn
+ return nil
+ }
+}
+
// WriterBlockSize allows to override the default block size.
// Blocks will be this size or smaller.
// Minimum size is 4KB and maximum size is 4MB.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 03744fbc..9c28840c 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
- println("Reading table for", tableIndex(i))
+ if debugDecoder {
+ println("Reading table for", tableIndex(i))
+ }
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index a4f5bf91..84a79fde 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -179,9 +179,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -210,12 +210,12 @@ encodeLoop:
// Index match start+1 (long) -> s - 1
index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -241,9 +241,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -270,11 +270,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -708,9 +708,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -738,12 +738,12 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -772,9 +772,9 @@ encodeLoop:
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -801,11 +801,11 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index a154c18f..d36be7bd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -138,9 +138,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -166,11 +166,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -798,9 +798,9 @@ encodeLoop:
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -826,11 +826,11 @@ encodeLoop:
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 72af7ef0..a79c4a52 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error {
return nil
}
if final && len(s.filling) > 0 {
- s.current = e.EncodeAll(s.filling, s.current[:0])
+ s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@@ -469,6 +469,15 @@ func (e *Encoder) Close() error {
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ e.encoders <- enc
+ }()
+ return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
}
return dst
}
- e.init.Do(e.initialize)
- enc := <-e.encoders
- defer func() {
- // Release encoder reference to last block.
- // If a non-single block is needed the encoder will reset again.
- e.encoders <- enc
- }()
+
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 53e160f7..e47af66e 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error {
}
return err
}
- printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ if debugDecoder {
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ }
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 8adabd82..c59f17e0 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
- return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
@@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
return io.ErrUnexpectedEOF
}
- return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 5b06174b..f5591fa1 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
diff --git a/vendor/github.com/lestrrat-go/strftime/Changes b/vendor/github.com/lestrrat-go/strftime/Changes
index b86a84c4..35cef2bf 100644
--- a/vendor/github.com/lestrrat-go/strftime/Changes
+++ b/vendor/github.com/lestrrat-go/strftime/Changes
@@ -1,6 +1,13 @@
Changes
=======
+v1.1.0 - 28 Aug 2024
+[Miscellaneous]
+ * github.com/pkg/errors has been removed (it has been two years :)
+ * Updated build/test actions
+ * Updated minimum required go version to go 1.21
+ * Fix week number handling
+
v1.0.6 - 20 Apr 2022
[Miscellaneous]
* Minimum go version is now go 1.13
diff --git a/vendor/github.com/lestrrat-go/strftime/appenders.go b/vendor/github.com/lestrrat-go/strftime/appenders.go
index 2941a247..e09a3e5d 100644
--- a/vendor/github.com/lestrrat-go/strftime/appenders.go
+++ b/vendor/github.com/lestrrat-go/strftime/appenders.go
@@ -36,13 +36,13 @@ var (
secondsNumberZeroPad = StdlibFormat("05")
hms = StdlibFormat("15:04:05")
tab = Verbatim("\t")
- weekNumberSundayOrigin = weeknumberOffset(0) // week number of the year, Sunday first
+ weekNumberSundayOrigin = weeknumberOffset(true) // week number of the year, Sunday first
weekdayMondayOrigin = weekday(1)
// monday as the first day, and 01 as the first value
weekNumberMondayOriginOneOrigin = AppendFunc(appendWeekNumber)
eby = StdlibFormat("_2-Jan-2006")
// monday as the first day, and 00 as the first value
- weekNumberMondayOrigin = weeknumberOffset(1) // week number of the year, Monday first
+ weekNumberMondayOrigin = weeknumberOffset(false) // week number of the year, Monday first
weekdaySundayOrigin = weekday(0)
natReprTime = StdlibFormat("15:04:05") // national representation of the time XXX is this correct?
natReprDate = StdlibFormat("01/02/06") // national representation of the date XXX is this correct?
@@ -243,20 +243,16 @@ func (v weekday) Append(b []byte, t time.Time) []byte {
return append(b, byte(n+48))
}
-type weeknumberOffset int
+type weeknumberOffset bool
func (v weeknumberOffset) Append(b []byte, t time.Time) []byte {
- yd := t.YearDay()
- offset := int(t.Weekday()) - int(v)
- if offset < 0 {
- offset += 7
+ offset := int(t.Weekday())
+ if v {
+ offset = 6 - offset
+ } else if offset != 0 {
+ offset = 7 - offset
}
-
- if yd < offset {
- return append(b, '0', '0')
- }
-
- n := ((yd - offset) / 7) + 1
+ n := (t.YearDay() + offset) / 7
if n < 10 {
b = append(b, '0')
}
diff --git a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go b/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go
deleted file mode 100644
index 18871c14..00000000
--- a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build strftime_native_errors
-// +build strftime_native_errors
-
-package errors
-
-import "fmt"
-
-func New(s string) error {
- return fmt.Errorf(s)
-}
-
-func Errorf(s string, args ...interface{}) error {
- return fmt.Errorf(s, args...)
-}
-
-func Wrap(err error, s string) error {
- return fmt.Errorf(s+`: %w`, err)
-}
diff --git a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go b/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go
deleted file mode 100644
index 35797878..00000000
--- a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build !strftime_native_errors
-// +build !strftime_native_errors
-
-package errors
-
-import "github.com/pkg/errors"
-
-func New(s string) error {
- return errors.New(s)
-}
-
-func Errorf(s string, args ...interface{}) error {
- return errors.Errorf(s, args...)
-}
-
-func Wrap(err error, s string) error {
- return errors.Wrap(err, s)
-}
diff --git a/vendor/github.com/lestrrat-go/strftime/specifications.go b/vendor/github.com/lestrrat-go/strftime/specifications.go
index 2b6e11fe..3bd796a9 100644
--- a/vendor/github.com/lestrrat-go/strftime/specifications.go
+++ b/vendor/github.com/lestrrat-go/strftime/specifications.go
@@ -1,10 +1,9 @@
package strftime
import (
+ "errors"
"fmt"
"sync"
-
- "github.com/lestrrat-go/strftime/internal/errors"
)
// because there is no such thing was a sync.RWLocker
@@ -124,7 +123,7 @@ func (ds *specificationSet) Lookup(b byte) (Appender, error) {
}
v, ok := ds.store[b]
if !ok {
- return nil, errors.Errorf(`lookup failed: '%%%c' was not found in specification set`, b)
+ return nil, fmt.Errorf(`lookup failed: '%%%c' was not found in specification set`, b)
}
return v, nil
}
diff --git a/vendor/github.com/lestrrat-go/strftime/strftime.go b/vendor/github.com/lestrrat-go/strftime/strftime.go
index c869491f..3d51ac6d 100644
--- a/vendor/github.com/lestrrat-go/strftime/strftime.go
+++ b/vendor/github.com/lestrrat-go/strftime/strftime.go
@@ -1,12 +1,12 @@
package strftime
import (
+ "errors"
+ "fmt"
"io"
"strings"
"sync"
"time"
-
- "github.com/lestrrat-go/strftime/internal/errors"
)
type compileHandler interface {
@@ -62,7 +62,7 @@ func compile(handler compileHandler, p string, ds SpecificationSet) error {
specification, err := ds.Lookup(p[1])
if err != nil {
- return errors.Wrap(err, `pattern compilation failed`)
+ return fmt.Errorf("pattern compilation failed: %w", err)
}
handler.handle(specification)
@@ -127,14 +127,14 @@ func Format(p string, t time.Time, options ...Option) (string, error) {
// TODO: this may be premature optimization
ds, err := getSpecificationSetFor(options...)
if err != nil {
- return "", errors.Wrap(err, `failed to get specification set`)
+ return "", fmt.Errorf("failed to get specification set: %w", err)
}
h := getFmtAppendExecutor()
defer releasdeFmtAppendExecutor(h)
h.t = t
if err := compile(h, p, ds); err != nil {
- return "", errors.Wrap(err, `failed to compile format`)
+ return "", fmt.Errorf("failed to compile format: %w", err)
}
return string(h.dst), nil
@@ -152,14 +152,14 @@ func New(p string, options ...Option) (*Strftime, error) {
// TODO: this may be premature optimization
ds, err := getSpecificationSetFor(options...)
if err != nil {
- return nil, errors.Wrap(err, `failed to get specification set`)
+ return nil, fmt.Errorf("failed to get specification set: %w", err)
}
var h appenderListBuilder
h.list = &combiningAppend{}
if err := compile(&h, p, ds); err != nil {
- return nil, errors.Wrap(err, `failed to compile format`)
+ return nil, fmt.Errorf("failed to compile format: %w", err)
}
return &Strftime{
diff --git a/vendor/github.com/mholt/acmez/v2/acme/ari.go b/vendor/github.com/mholt/acmez/v2/acme/ari.go
index ae802ce2..93401f37 100644
--- a/vendor/github.com/mholt/acmez/v2/acme/ari.go
+++ b/vendor/github.com/mholt/acmez/v2/acme/ari.go
@@ -140,11 +140,54 @@ func (c *Client) GetRenewalInfo(ctx context.Context, leafCert *x509.Certificate)
}
var ari RenewalInfo
- resp, err := c.httpReq(ctx, http.MethodGet, c.ariEndpoint(certID), nil, &ari)
- if err != nil {
- return RenewalInfo{}, err
+ var resp *http.Response
+ for i := 0; i < 3; i++ {
+ // backoff between retries; the if is probably not needed, but just for "properness"...
+ if i > 0 {
+ select {
+ case <-ctx.Done():
+ return RenewalInfo{}, ctx.Err()
+ case <-time.After(time.Duration(i*i+1) * time.Second):
+ }
+ }
+
+ resp, err = c.httpReq(ctx, http.MethodGet, c.ariEndpoint(certID), nil, &ari)
+ if err != nil {
+ if c.Logger != nil {
+ c.Logger.Warn("error getting ARI response",
+ zap.Error(err),
+ zap.Int("attempt", i),
+ zap.Strings("names", leafCert.DNSNames))
+ }
+ continue
+ }
+
+ // "If the client receives no response or a malformed response
+ // (e.g. an end timestamp which is equal to or precedes the start
+ // timestamp), it SHOULD make its own determination of when to
+ // renew the certificate, and MAY retry the renewalInfo request
+ // with appropriate exponential backoff behavior."
+ // draft-ietf-acme-ari-04 §4.2
+ if ari.SuggestedWindow.Start.IsZero() ||
+ ari.SuggestedWindow.End.IsZero() ||
+ ari.SuggestedWindow.Start.Equal(ari.SuggestedWindow.End) ||
+ (ari.SuggestedWindow.End.Unix()-ari.SuggestedWindow.Start.Unix()-1 <= 0) {
+ if c.Logger != nil {
+ c.Logger.Debug("invalid ARI window",
+ zap.Time("start", ari.SuggestedWindow.Start),
+ zap.Time("end", ari.SuggestedWindow.End),
+ zap.Strings("names", leafCert.DNSNames))
+ }
+ continue
+ }
+
+ // valid ARI window
+ ari.UniqueIdentifier = certID
+ break
+ }
+ if err != nil || resp == nil {
+ return RenewalInfo{}, fmt.Errorf("could not get a valid ARI response; last error: %v", err)
}
- ari.UniqueIdentifier = certID
// "The server SHOULD include a Retry-After header indicating the polling
// interval that the ACME server recommends." draft-ietf-acme-ari-03 §4.2
@@ -161,7 +204,7 @@ func (c *Client) GetRenewalInfo(ctx context.Context, leafCert *x509.Certificate)
// time within the suggested window." §4.2
// TODO: It's unclear whether this time should be selected once
// or every time the client wakes to check ARI (see step 5 of the
- // recommended algorithm); I've inquired here:
+ // recommended algorithm); I've enquired here:
// https://github.com/aarongable/draft-acme-ari/issues/70
// We add 1 to the start time since we are dealing in seconds for
// simplicity, but the server may provide sub-second timestamps.
@@ -198,10 +241,10 @@ func ARIUniqueIdentifier(leafCert *x509.Certificate) (string, error) {
if leafCert.SerialNumber == nil {
return "", fmt.Errorf("no serial number")
}
- // TODO: Let's Encrypt's reference implementation switched from using
- // SerialNumber.Bytes() to this method, which seems less efficient,
- // but yields the same results !? I asked about it here:
- // https://github.com/letsencrypt/website/issues/1670
+ // use asn1.Marshal to be correct even when the leading byte is 0x80
+ // or greater to ensure the number is interpreted as positive; note that
+ // SerialNumber.Bytes() does not account for this because it is a nuance
+ // of ASN.1 DER encodings. See https://github.com/letsencrypt/website/issues/1670.
serialDER, err := asn1.Marshal(leafCert.SerialNumber)
if err != nil {
return "", err
diff --git a/vendor/github.com/mholt/acmez/v2/acme/certificate.go b/vendor/github.com/mholt/acmez/v2/acme/certificate.go
index 29c624af..97662c61 100644
--- a/vendor/github.com/mholt/acmez/v2/acme/certificate.go
+++ b/vendor/github.com/mholt/acmez/v2/acme/certificate.go
@@ -51,6 +51,15 @@ type Certificate struct {
// the certificate for restoring a lost ACME client config.
CA string `json:"ca,omitempty"`
+ // The location of the account that obtained the certificate.
+ // This field is not part of the ACME spec, but it can be
+ // useful for management; for example, ARI recommends that
+ // servers enforce that the same account be used to indicate
+ // a replacement as was used to obtain the original cert.
+ // This field is set even when ARI is not enabled, for
+ // reference/troubleshooting purposes.
+ Account string `json:"account,omitempty"`
+
// When to renew the certificate, and related info, as
// prescribed by ARI.
RenewalInfo *RenewalInfo `json:"renewal_info,omitempty"`
@@ -99,6 +108,7 @@ func (c *Client) GetCertificateChain(ctx context.Context, account Account, certU
URL: certURL,
ChainPEM: chainPEM,
CA: c.Directory,
+ Account: account.Location,
}
// attach renewal information, if applicable (draft-ietf-acme-ari-03)
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
index 10ddda14..8d5a2a47 100644
--- a/vendor/github.com/miekg/dns/README.md
+++ b/vendor/github.com/miekg/dns/README.md
@@ -148,6 +148,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository.
* 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
+* 3596 - AAAA record
* 3597 - Unknown RRs
* 4025 - A Method for Storing IPsec Keying Material in DNS
* 403{3,4,5} - DNSSEC + validation functions
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
index 1b58e8f0..c1bbdaae 100644
--- a/vendor/github.com/miekg/dns/edns.go
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -756,36 +756,48 @@ const (
ExtendedErrorCodeNoReachableAuthority
ExtendedErrorCodeNetworkError
ExtendedErrorCodeInvalidData
+ ExtendedErrorCodeSignatureExpiredBeforeValid
+ ExtendedErrorCodeTooEarly
+ ExtendedErrorCodeUnsupportedNSEC3IterValue
+ ExtendedErrorCodeUnableToConformToPolicy
+ ExtendedErrorCodeSynthesized
+ ExtendedErrorCodeInvalidQueryType
)
// ExtendedErrorCodeToString maps extended error info codes to a human readable
// description.
var ExtendedErrorCodeToString = map[uint16]string{
- ExtendedErrorCodeOther: "Other",
- ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm",
- ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type",
- ExtendedErrorCodeStaleAnswer: "Stale Answer",
- ExtendedErrorCodeForgedAnswer: "Forged Answer",
- ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate",
- ExtendedErrorCodeDNSBogus: "DNSSEC Bogus",
- ExtendedErrorCodeSignatureExpired: "Signature Expired",
- ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid",
- ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing",
- ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing",
- ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set",
- ExtendedErrorCodeNSECMissing: "NSEC Missing",
- ExtendedErrorCodeCachedError: "Cached Error",
- ExtendedErrorCodeNotReady: "Not Ready",
- ExtendedErrorCodeBlocked: "Blocked",
- ExtendedErrorCodeCensored: "Censored",
- ExtendedErrorCodeFiltered: "Filtered",
- ExtendedErrorCodeProhibited: "Prohibited",
- ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer",
- ExtendedErrorCodeNotAuthoritative: "Not Authoritative",
- ExtendedErrorCodeNotSupported: "Not Supported",
- ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority",
- ExtendedErrorCodeNetworkError: "Network Error",
- ExtendedErrorCodeInvalidData: "Invalid Data",
+ ExtendedErrorCodeOther: "Other",
+ ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm",
+ ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type",
+ ExtendedErrorCodeStaleAnswer: "Stale Answer",
+ ExtendedErrorCodeForgedAnswer: "Forged Answer",
+ ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate",
+ ExtendedErrorCodeDNSBogus: "DNSSEC Bogus",
+ ExtendedErrorCodeSignatureExpired: "Signature Expired",
+ ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid",
+ ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing",
+ ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing",
+ ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set",
+ ExtendedErrorCodeNSECMissing: "NSEC Missing",
+ ExtendedErrorCodeCachedError: "Cached Error",
+ ExtendedErrorCodeNotReady: "Not Ready",
+ ExtendedErrorCodeBlocked: "Blocked",
+ ExtendedErrorCodeCensored: "Censored",
+ ExtendedErrorCodeFiltered: "Filtered",
+ ExtendedErrorCodeProhibited: "Prohibited",
+ ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer",
+ ExtendedErrorCodeNotAuthoritative: "Not Authoritative",
+ ExtendedErrorCodeNotSupported: "Not Supported",
+ ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority",
+ ExtendedErrorCodeNetworkError: "Network Error",
+ ExtendedErrorCodeInvalidData: "Invalid Data",
+ ExtendedErrorCodeSignatureExpiredBeforeValid: "Signature Expired Before Valid",
+ ExtendedErrorCodeTooEarly: "Too Early",
+ ExtendedErrorCodeUnsupportedNSEC3IterValue: "Unsupported NSEC3 Iterations Value",
+ ExtendedErrorCodeUnableToConformToPolicy: "Unable To Conform To Policy",
+ ExtendedErrorCodeSynthesized: "Synthesized",
+ ExtendedErrorCodeInvalidQueryType: "Invalid Query Type",
}
// StringToExtendedErrorCode is a map from human readable descriptions to
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index 8e3129cb..7a34c14c 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -96,6 +96,7 @@ const (
TypeLP uint16 = 107
TypeEUI48 uint16 = 108
TypeEUI64 uint16 = 109
+ TypeNXNAME uint16 = 128
TypeURI uint16 = 256
TypeCAA uint16 = 257
TypeAVC uint16 = 258
@@ -294,6 +295,19 @@ func (*NULL) parse(c *zlexer, origin string) *ParseError {
return &ParseError{err: "NULL records do not have a presentation format"}
}
+// NXNAME is a meta record. See https://www.iana.org/go/draft-ietf-dnsop-compact-denial-of-existence-04
+// Reference: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
+type NXNAME struct {
+ Hdr RR_Header
+ // Does not have any rdata
+}
+
+func (rr *NXNAME) String() string { return rr.Hdr.String() }
+
+func (*NXNAME) parse(c *zlexer, origin string) *ParseError {
+ return &ParseError{err: "NXNAME records do not have a presentation format"}
+}
+
// CNAME RR. See RFC 1034.
type CNAME struct {
Hdr RR_Header
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index dc34e590..00c8629f 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = v{1, 1, 58}
+var Version = v{1, 1, 62}
// v holds the version of this library.
type v struct {
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
index 03029fb3..330c0539 100644
--- a/vendor/github.com/miekg/dns/zduplicate.go
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -886,6 +886,15 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool {
return true
}
+func (r1 *NXNAME) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NXNAME)
+ if !ok {
+ return false
+ }
+ _ = r2
+ return true
+}
+
func (r1 *NXT) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*NXT)
if !ok {
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
index 39b3bc81..5a6cf4c6 100644
--- a/vendor/github.com/miekg/dns/zmsg.go
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -706,6 +706,10 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b
return off, nil
}
+func (rr *NXNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ return off, nil
+}
+
func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
off, err = packDomainName(rr.NextDomain, msg, off, compression, false)
if err != nil {
@@ -2266,6 +2270,13 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) {
return off, nil
}
+func (rr *NXNAME) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ return off, nil
+}
+
func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) {
rdStart := off
_ = rdStart
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
index 2c70fc44..11f13ecf 100644
--- a/vendor/github.com/miekg/dns/ztypes.go
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -60,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{
TypeNSEC3: func() RR { return new(NSEC3) },
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
TypeNULL: func() RR { return new(NULL) },
+ TypeNXNAME: func() RR { return new(NXNAME) },
TypeNXT: func() RR { return new(NXT) },
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
TypeOPT: func() RR { return new(OPT) },
@@ -146,6 +147,7 @@ var TypeToString = map[uint16]string{
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeNULL: "NULL",
+ TypeNXNAME: "NXNAME",
TypeNXT: "NXT",
TypeNone: "None",
TypeOPENPGPKEY: "OPENPGPKEY",
@@ -230,6 +232,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
func (rr *NULL) Header() *RR_Header { return &rr.Hdr }
+func (rr *NXNAME) Header() *RR_Header { return &rr.Hdr }
func (rr *NXT) Header() *RR_Header { return &rr.Hdr }
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
@@ -594,6 +597,11 @@ func (rr *NULL) len(off int, compression map[string]struct{}) int {
return l
}
+func (rr *NXNAME) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ return l
+}
+
func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
@@ -1107,6 +1115,10 @@ func (rr *NULL) copy() RR {
return &NULL{rr.Hdr, rr.Data}
}
+func (rr *NXNAME) copy() RR {
+ return &NXNAME{rr.Hdr}
+}
+
func (rr *NXT) copy() RR {
return &NXT{*rr.NSEC.copy().(*NSEC)}
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
new file mode 100644
index 00000000..8bf537f7
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/api-bucket-cors.go
@@ -0,0 +1,136 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2024 MinIO, Inc.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bytes"
+ "context"
+ "net/http"
+ "net/url"
+
+ "github.com/minio/minio-go/v7/pkg/cors"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+)
+
+// SetBucketCors sets the cors configuration for the bucket
+func (c *Client) SetBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ if corsConfig == nil {
+ return c.removeBucketCors(ctx, bucketName)
+ }
+
+ return c.putBucketCors(ctx, bucketName, corsConfig)
+}
+
+func (c *Client) putBucketCors(ctx context.Context, bucketName string, corsConfig *cors.Config) error {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ corsStr, err := corsConfig.ToXML()
+ if err != nil {
+ return err
+ }
+
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(corsStr),
+ contentLength: int64(len(corsStr)),
+ contentMD5Base64: sumMD5Base64([]byte(corsStr)),
+ }
+
+ resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+func (c *Client) removeBucketCors(ctx context.Context, bucketName string) error {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+
+ return nil
+}
+
+// GetBucketCors returns the current cors
+func (c *Client) GetBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return nil, err
+ }
+ bucketCors, err := c.getBucketCors(ctx, bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchCORSConfiguration" {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return bucketCors, nil
+}
+
+func (c *Client) getBucketCors(ctx context.Context, bucketName string) (*cors.Config, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("cors", "")
+
+ resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex, // TODO: needed? copied over from other example, but not spec'd in API.
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return nil, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ corsConfig, err := cors.ParseBucketCorsConfig(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ return corsConfig, nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
index 5f117afa..a70cbea9 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go
@@ -24,7 +24,6 @@ import (
"encoding/hex"
"encoding/xml"
"fmt"
- "hash/crc32"
"io"
"net/http"
"net/url"
@@ -87,7 +86,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
@@ -116,7 +115,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
for partNumber <= totalPartsCount {
length, rErr := readFull(reader, buf)
if rErr == io.EOF && partNumber > 1 {
@@ -154,7 +153,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
- customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@@ -202,12 +201,13 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj
sort.Sort(completedParts(complMultipartUpload.Parts))
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
index 51226630..eef976c8 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go
@@ -22,7 +22,6 @@ import (
"context"
"encoding/base64"
"fmt"
- "hash/crc32"
"io"
"net/http"
"net/url"
@@ -109,13 +108,15 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
if err != nil {
return UploadInfo{}, err
}
-
+ if opts.Checksum.IsSet() {
+ opts.AutoChecksum = opts.Checksum
+ }
withChecksum := c.trailingHeaderSupport
if withChecksum {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
@@ -195,10 +196,10 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
trailer := make(http.Header, 1)
if withChecksum {
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
- trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
+ crc := opts.AutoChecksum.Hasher()
+ trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) {
- trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
+ trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash))
})
}
@@ -271,17 +272,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if withChecksum {
// Add hash of hashes.
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
for _, part := range complMultipartUpload.Parts {
- cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C)
+ cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum))
if err == nil {
crc.Write(cs)
}
}
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
@@ -304,11 +306,16 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
return UploadInfo{}, err
}
+ if opts.Checksum.IsSet() {
+ opts.AutoChecksum = opts.Checksum
+ opts.SendContentMd5 = false
+ }
+
if !opts.SendContentMd5 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Calculate the optimal parts info for a given size.
@@ -337,7 +344,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
md5Hash := c.md5Hasher()
defer md5Hash.Close()
@@ -381,7 +388,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
- customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@@ -433,12 +440,13 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
@@ -462,12 +470,15 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
if err = s3utils.CheckValidObjectName(objectName); err != nil {
return UploadInfo{}, err
}
-
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ opts.AutoChecksum = opts.Checksum
+ }
if !opts.SendContentMd5 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Cancel all when an error occurs.
@@ -500,7 +511,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
// Create checksums
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
// Total data read and written to server. should be equal to 'size' at the end of the call.
var totalUploadedSize int64
@@ -554,11 +565,11 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
// Calculate md5sum.
customHeader := make(http.Header)
if !opts.SendContentMd5 {
- // Add CRC32C instead.
+ // Add Checksum instead.
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
- customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@@ -639,12 +650,13 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
@@ -675,6 +687,9 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r
if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 {
return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'")
}
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ }
var readSeeker io.Seeker
if size > 0 {
@@ -744,17 +759,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
// Set headers.
customHeader := opts.Header()
- // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
- addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
-
- if addCrc {
- // If user has added checksums, don't add them ourselves.
- for k := range opts.UserMetadata {
- if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
- addCrc = false
- }
- }
- }
// Populate request metadata.
reqMetadata := requestMetadata{
bucketName: bucketName,
@@ -765,8 +769,24 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string,
contentMD5Base64: md5Base64,
contentSHA256Hex: sha256Hex,
streamSha256: !opts.DisableContentSha256,
- addCrc: addCrc,
}
+ // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks.
+ addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure)
+ if opts.Checksum.IsSet() {
+ reqMetadata.addCrc = &opts.Checksum
+ } else if addCrc {
+ // If user has added checksums, don't add them ourselves.
+ for k := range opts.UserMetadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") {
+ addCrc = false
+ }
+ }
+ if addCrc {
+ opts.AutoChecksum.SetDefault(ChecksumCRC32C)
+ reqMetadata.addCrc = &opts.AutoChecksum
+ }
+ }
+
if opts.Internal.SourceVersionID != "" {
if opts.Internal.SourceVersionID != nullVersionID {
if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil {
diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go
index 6ccb5815..d769648a 100644
--- a/vendor/github.com/minio/minio-go/v7/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go
@@ -23,7 +23,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "hash/crc32"
"io"
"net/http"
"sort"
@@ -90,6 +89,18 @@ type PutObjectOptions struct {
DisableContentSha256 bool
DisableMultipart bool
+ // AutoChecksum is the type of checksum that will be added if no other checksum is added,
+ // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type.
+ // If none is specified CRC32C is used, since it is generally the fastest.
+ AutoChecksum ChecksumType
+
+ // Checksum will force a checksum of the specific type.
+ // This requires that the client was created with "TrailingHeaders:true" option,
+ // and that the destination server supports it.
+ // Unavailable with V2 signatures & Google endpoints.
+ // This will disable content MD5 checksums if set.
+ Checksum ChecksumType
+
// ConcurrentStreamParts will create NumThreads buffers of PartSize bytes,
// fill them serially and upload them in parallel.
// This can be used for faster uploads on non-seekable or slow-to-seek input.
@@ -236,7 +247,7 @@ func (opts PutObjectOptions) Header() (header http.Header) {
}
// validate() checks if the UserMetadata map has standard headers or and raises an error if so.
-func (opts PutObjectOptions) validate() (err error) {
+func (opts PutObjectOptions) validate(c *Client) (err error) {
for k, v := range opts.UserMetadata {
if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) {
return errInvalidArgument(k + " unsupported user defined metadata name")
@@ -251,6 +262,17 @@ func (opts PutObjectOptions) validate() (err error) {
if opts.LegalHold != "" && !opts.LegalHold.IsValid() {
return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status")
}
+ if opts.Checksum.IsSet() {
+ switch {
+ case !c.trailingHeaderSupport:
+ return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled")
+ case c.overrideSignerType.IsV2():
+ return errInvalidArgument("Checksum cannot be used with v2 signatures")
+ case s3utils.IsGoogleEndpoint(*c.endpointURL):
+ return errInvalidArgument("Checksum cannot be used with GCS endpoints")
+ }
+ }
+
return nil
}
@@ -287,7 +309,7 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r
return UploadInfo{}, errors.New("object size must be provided with disable multipart upload")
}
- err = opts.validate()
+ err = opts.validate(c)
if err != nil {
return UploadInfo{}, err
}
@@ -300,6 +322,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
if size > int64(maxMultipartPutObjectSize) {
return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
+ opts.AutoChecksum.SetDefault(ChecksumCRC32C)
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(*c.endpointURL) {
@@ -328,7 +351,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str
return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}
- if size < int64(partSize) || opts.DisableMultipart {
+ if size <= int64(partSize) || opts.DisableMultipart {
return c.putObject(ctx, bucketName, objectName, reader, size, opts)
}
@@ -357,11 +380,15 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
return UploadInfo{}, err
}
+ if opts.Checksum.IsSet() {
+ opts.SendContentMd5 = false
+ opts.AutoChecksum = opts.Checksum
+ }
if !opts.SendContentMd5 {
if opts.UserMetadata == nil {
opts.UserMetadata = make(map[string]string, 1)
}
- opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C"
+ opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String()
}
// Initiate a new multipart upload.
@@ -390,7 +417,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
// CRC32C is ~50% faster on AMD64 @ 30GB/s
var crcBytes []byte
customHeader := make(http.Header)
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := opts.AutoChecksum.Hasher()
for partNumber <= totalPartsCount {
length, rerr := readFull(reader, buf)
@@ -413,7 +440,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
crc.Reset()
crc.Write(buf[:length])
cSum := crc.Sum(nil)
- customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum))
+ customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum))
crcBytes = append(crcBytes, cSum...)
}
@@ -466,12 +493,13 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam
opts = PutObjectOptions{
ServerSideEncryption: opts.ServerSideEncryption,
+ AutoChecksum: opts.AutoChecksum,
}
if len(crcBytes) > 0 {
// Add hash of hashes.
crc.Reset()
crc.Write(crcBytes)
- opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))}
+ opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))}
}
uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
index eb4da414..6b6559bf 100644
--- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
+++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go
@@ -107,7 +107,7 @@ type readSeekCloser interface {
// Total size should be < 5TB.
// This function blocks until 'objs' is closed and the content has been uploaded.
func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) {
- err = opts.Opts.validate()
+ err = opts.Opts.validate(&c)
if err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 1527b746..790606c5 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -340,6 +340,22 @@ type CompletePart struct {
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
}
+// Checksum will return the checksum for the given type.
+// Will return the empty string if not set.
+func (c CompletePart) Checksum(t ChecksumType) string {
+ switch {
+ case t.Is(ChecksumCRC32C):
+ return c.ChecksumCRC32C
+ case t.Is(ChecksumCRC32):
+ return c.ChecksumCRC32
+ case t.Is(ChecksumSHA1):
+ return c.ChecksumSHA1
+ case t.Is(ChecksumSHA256):
+ return c.ChecksumSHA256
+ }
+ return ""
+}
+
// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 93755140..1d6b6650 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -23,7 +23,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "hash/crc32"
"io"
"math/rand"
"net"
@@ -129,7 +128,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.74"
+ libraryVersion = "v7.0.77"
)
// User Agent should always following the below style.
@@ -471,7 +470,7 @@ type requestMetadata struct {
contentMD5Base64 string // carries base64 encoded md5sum
contentSHA256Hex string // carries hex encoded sha256sum
streamSha256 bool
- addCrc bool
+ addCrc *ChecksumType
trailer http.Header // (http.Request).Trailer. Requires v4 signature.
}
@@ -616,16 +615,16 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
}
}
- if metadata.addCrc && metadata.contentLength > 0 {
+ if metadata.addCrc != nil && metadata.contentLength > 0 {
if metadata.trailer == nil {
metadata.trailer = make(http.Header, 1)
}
- crc := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ crc := metadata.addCrc.Hasher()
metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) {
// Update trailer when done.
- metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash))
+ metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash))
})
- metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil)))
+ metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil)))
}
// Create cancel context to control 'newRetryTimer' go routine.
@@ -662,7 +661,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ
// Initiate the request.
res, err = c.do(req)
if err != nil {
- if isRequestErrorRetryable(err) {
+ if isRequestErrorRetryable(ctx, err) {
// Retry the request
continue
}
diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go
index a1f6f434..7eb1bf25 100644
--- a/vendor/github.com/minio/minio-go/v7/checksum.go
+++ b/vendor/github.com/minio/minio-go/v7/checksum.go
@@ -25,6 +25,7 @@ import (
"hash/crc32"
"io"
"math/bits"
+ "net/http"
)
// ChecksumType contains information about the checksum type.
@@ -78,6 +79,11 @@ func (c ChecksumType) Key() string {
return ""
}
+// KeyCapitalized returns the capitalized key as used in HTTP headers.
+func (c ChecksumType) KeyCapitalized() string {
+ return http.CanonicalHeaderKey(c.Key())
+}
+
// RawByteLen returns the size of the un-encoded checksum.
func (c ChecksumType) RawByteLen() int {
switch c & checksumMask {
@@ -112,6 +118,13 @@ func (c ChecksumType) IsSet() bool {
return bits.OnesCount32(uint32(c)) == 1
}
+// SetDefault will set the checksum if not already set.
+func (c *ChecksumType) SetDefault(t ChecksumType) {
+ if !c.IsSet() {
+ *c = t
+ }
+}
+
// String returns the type as a string.
// CRC32, CRC32C, SHA1, and SHA256 for valid values.
// Empty string for unset and "" if not valid.
diff --git a/vendor/github.com/minio/minio-go/v7/core.go b/vendor/github.com/minio/minio-go/v7/core.go
index 132ea702..99b99db9 100644
--- a/vendor/github.com/minio/minio-go/v7/core.go
+++ b/vendor/github.com/minio/minio-go/v7/core.go
@@ -91,6 +91,7 @@ type PutObjectPartOptions struct {
Md5Base64, Sha256Hex string
SSE encrypt.ServerSide
CustomHeader, Trailer http.Header
+ DisableContentSha256 bool
}
// PutObjectPart - Upload an object part.
@@ -107,7 +108,7 @@ func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string
sha256Hex: opts.Sha256Hex,
size: size,
sse: opts.SSE,
- streamSha256: true,
+ streamSha256: !opts.DisableContentSha256,
customHeader: opts.CustomHeader,
trailer: opts.Trailer,
}
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index e77bf9d4..780dc899 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -24,7 +24,6 @@ import (
"archive/zip"
"bytes"
"context"
- "crypto/sha1"
"crypto/sha256"
"encoding/base64"
"errors"
@@ -52,6 +51,7 @@ import (
"github.com/google/uuid"
"github.com/minio/minio-go/v7"
+ "github.com/minio/minio-go/v7/pkg/cors"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/encrypt"
"github.com/minio/minio-go/v7/pkg/notification"
@@ -83,7 +83,7 @@ func createHTTPTransport() (transport *http.Transport) {
return nil
}
- if mustParseBool(os.Getenv(skipCERTValidation)) {
+ if mustParseBool(os.Getenv(enableHTTPS)) && mustParseBool(os.Getenv(skipCERTValidation)) {
transport.TLSClientConfig.InsecureSkipVerify = true
}
@@ -165,7 +165,7 @@ func logError(testName, function string, args map[string]interface{}, startTime
}
}
-// log failed test runs
+// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run
func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) {
l := baseLogger(testName, function, args, startTime).With(
"status", "FAIL",
@@ -2198,22 +2198,15 @@ func testPutObjectWithChecksums() {
defer cleanupBucket(bucketName, c)
tests := []struct {
- header string
- hasher hash.Hash
-
- // Checksum values
- ChecksumCRC32 string
- ChecksumCRC32C string
- ChecksumSHA1 string
- ChecksumSHA256 string
+ cs minio.ChecksumType
}{
- {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()},
- {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))},
- {header: "x-amz-checksum-sha1", hasher: sha1.New()},
- {header: "x-amz-checksum-sha256", hasher: sha256.New()},
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
}
- for i, test := range tests {
+ for _, test := range tests {
bufSize := dataFileMap["datafile-10-kB"]
// Save the data
@@ -2234,29 +2227,27 @@ func testPutObjectWithChecksums() {
logError(testName, function, args, startTime, "", "Read failed", err)
return
}
- h := test.hasher
+ h := test.cs.Hasher()
h.Reset()
- // Wrong CRC.
- meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ // Test with Wrong CRC.
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
args["metadata"] = meta
args["range"] = "false"
+ args["checksum"] = test.cs.String()
resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
DisableMultipart: true,
UserMetadata: meta,
})
if err == nil {
- if i == 0 && resp.ChecksumCRC32 == "" {
- logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend")
- return
- }
- logError(testName, function, args, startTime, "", "PutObject failed", err)
+ logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err)
return
}
// Set correct CRC.
h.Write(b)
- meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
reader.Close()
resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
@@ -2343,7 +2334,7 @@ func testPutObjectWithChecksums() {
}
// Test PutObject with custom checksums.
-func testPutMultipartObjectWithChecksums() {
+func testPutObjectWithTrailingChecksums() {
// initialize logging params
startTime := time.Now()
testName := getFuncName()
@@ -2351,7 +2342,7 @@ func testPutMultipartObjectWithChecksums() {
args := map[string]interface{}{
"bucketName": "",
"objectName": "",
- "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}",
}
if !isFullMode() {
@@ -2365,9 +2356,201 @@ func testPutMultipartObjectWithChecksums() {
// Instantiate new minio client object.
c, err := minio.New(os.Getenv(serverEndpoint),
&minio.Options{
- Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
- Transport: createHTTPTransport(),
- Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: true,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ defer cleanupBucket(bucketName, c)
+ tests := []struct {
+ cs minio.ChecksumType
+ }{
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
+ }
+
+ for _, test := range tests {
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ bufSize := dataFileMap["datafile-10-kB"]
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ cmpChecksum := func(got, want string) {
+ if want != got {
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ return
+ }
+ }
+
+ meta := map[string]string{}
+ reader := getDataReader("datafile-10-kB")
+ b, err := io.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ h := test.cs.Hasher()
+ h.Reset()
+
+ // Test with Wrong CRC.
+ args["metadata"] = meta
+ args["range"] = "false"
+ args["checksum"] = test.cs.String()
+
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{
+ DisableMultipart: true,
+ DisableContentSha256: true,
+ UserMetadata: meta,
+ Checksum: test.cs,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ h.Write(b)
+ meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ // Read the data back
+ gopts := minio.GetObjectOptions{Checksum: true}
+
+ function = "GetObject(...)"
+ r, err := c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
+ }
+
+ function = "GetObject( Range...)"
+ args["range"] = "true"
+ err = gopts.SetRange(100, 1000)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetRange failed", err)
+ return
+ }
+ r, err = c.GetObject(context.Background(), bucketName, objectName, gopts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ b, err = io.ReadAll(r)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Read failed", err)
+ return
+ }
+ st, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Range requests should return empty checksums...
+ cmpChecksum(st.ChecksumSHA256, "")
+ cmpChecksum(st.ChecksumSHA1, "")
+ cmpChecksum(st.ChecksumCRC32, "")
+ cmpChecksum(st.ChecksumCRC32C, "")
+
+ function = "GetObjectAttributes(...)"
+ s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+ return
+ }
+ cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"])
+ cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"])
+ cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"])
+ cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"])
+
+ delete(args, "range")
+ delete(args, "metadata")
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test PutObject with custom checksums.
+func testPutMultipartObjectWithChecksums(trailing bool) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing),
+ }
+
+ if !isFullMode() {
+ logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs")
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ TrailingHeaders: trailing,
})
if err != nil {
logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
@@ -2418,17 +2601,12 @@ func testPutMultipartObjectWithChecksums() {
}
defer cleanupBucket(bucketName, c)
tests := []struct {
- header string
- hasher hash.Hash
-
- // Checksum values
- ChecksumCRC32 string
- ChecksumCRC32C string
- ChecksumSHA1 string
- ChecksumSHA256 string
+ cs minio.ChecksumType
}{
- // Currently there is no way to override the checksum type.
- {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"},
+ {cs: minio.ChecksumCRC32C},
+ {cs: minio.ChecksumCRC32},
+ {cs: minio.ChecksumSHA1},
+ {cs: minio.ChecksumSHA256},
}
for _, test := range tests {
@@ -2437,11 +2615,12 @@ func testPutMultipartObjectWithChecksums() {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
args["objectName"] = objectName
+ args["checksum"] = test.cs.String()
cmpChecksum := func(got, want string) {
if want != got {
- // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
- fmt.Printf("want %s, got %s\n", want, got)
+ logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got))
+ //fmt.Printf("want %s, got %s\n", want, got)
return
}
}
@@ -2454,26 +2633,57 @@ func testPutMultipartObjectWithChecksums() {
return
}
reader.Close()
- h := test.hasher
+ h := test.cs.Hasher()
h.Reset()
- test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher)
+ want := hashMultiPart(b, partSize, test.cs.Hasher())
+ var cs minio.ChecksumType
+ rd := io.Reader(io.NopCloser(bytes.NewReader(b)))
+ if trailing {
+ cs = test.cs
+ rd = bytes.NewReader(b)
+ }
// Set correct CRC.
-
- resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{
+ resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{
DisableContentSha256: true,
DisableMultipart: false,
UserMetadata: nil,
PartSize: partSize,
+ AutoChecksum: test.cs,
+ Checksum: cs,
})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
}
- cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256)
- cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1)
- cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32)
- cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C)
+
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(resp.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(resp.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(resp.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(resp.ChecksumSHA256, want)
+ }
+
+ s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err)
+ return
+ }
+ want = want[:strings.IndexByte(want, '-')]
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(s.Checksum.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(s.Checksum.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(s.Checksum.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(s.Checksum.ChecksumSHA256, want)
+ }
// Read the data back
gopts := minio.GetObjectOptions{Checksum: true}
@@ -2495,18 +2705,17 @@ func testPutMultipartObjectWithChecksums() {
// Test part 2 checksum...
h.Reset()
h.Write(b[partSize : 2*partSize])
- got := base64.StdEncoding.EncodeToString(h.Sum(nil))
- if test.ChecksumSHA256 != "" {
- cmpChecksum(st.ChecksumSHA256, got)
- }
- if test.ChecksumSHA1 != "" {
- cmpChecksum(st.ChecksumSHA1, got)
- }
- if test.ChecksumCRC32 != "" {
- cmpChecksum(st.ChecksumCRC32, got)
- }
- if test.ChecksumCRC32C != "" {
- cmpChecksum(st.ChecksumCRC32C, got)
+ want = base64.StdEncoding.EncodeToString(h.Sum(nil))
+
+ switch test.cs {
+ case minio.ChecksumCRC32C:
+ cmpChecksum(st.ChecksumCRC32C, want)
+ case minio.ChecksumCRC32:
+ cmpChecksum(st.ChecksumCRC32, want)
+ case minio.ChecksumSHA1:
+ cmpChecksum(st.ChecksumSHA1, want)
+ case minio.ChecksumSHA256:
+ cmpChecksum(st.ChecksumSHA256, want)
}
delete(args, "metadata")
@@ -2971,8 +3180,8 @@ func testGetObjectAttributes() {
testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{
ContentType: v.ContentType,
SendContentMd5: v.SendContentMd5,
+ Checksum: minio.ChecksumCRC32C,
})
-
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
@@ -3053,7 +3262,7 @@ func testGetObjectAttributes() {
test: objectAttributesTestOptions{
TestFileName: "file1",
StorageClass: "STANDARD",
- HasFullChecksum: false,
+ HasFullChecksum: true,
},
}
@@ -3142,9 +3351,10 @@ func testGetObjectAttributesSSECEncryption() {
info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
ContentType: "content/custom",
- SendContentMd5: true,
+ SendContentMd5: false,
ServerSideEncryption: sse,
PartSize: uint64(bufSize) / 2,
+ Checksum: minio.ChecksumCRC32C,
})
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
@@ -3164,9 +3374,9 @@ func testGetObjectAttributesSSECEncryption() {
ETag: info.ETag,
NumberOfParts: 2,
ObjectSize: int(info.Size),
- HasFullChecksum: false,
+ HasFullChecksum: true,
HasParts: true,
- HasPartChecksums: false,
+ HasPartChecksums: true,
})
if err != nil {
logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err)
@@ -5584,18 +5794,12 @@ func testPresignedPostPolicy() {
}
writer.Close()
- transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
- if err != nil {
- logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
- return
- }
-
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
// headers. Request is pro-actively canceled after 30secs
// with no response.
Timeout: 30 * time.Second,
- Transport: transport,
+ Transport: createHTTPTransport(),
}
args["url"] = presignedPostPolicyURL.String()
@@ -7212,7 +7416,6 @@ func testFunctional() {
"bucketName": bucketName,
}
exists, err = c.BucketExists(context.Background(), bucketName)
-
if err != nil {
logError(testName, function, args, startTime, "", "BucketExists failed", err)
return
@@ -7275,7 +7478,6 @@ func testFunctional() {
"bucketPolicy": writeOnlyPolicy,
}
err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy)
-
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
@@ -7304,7 +7506,6 @@ func testFunctional() {
"bucketPolicy": readWritePolicy,
}
err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
-
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
@@ -7481,7 +7682,6 @@ func testFunctional() {
"fileName": fileName + "-f",
}
err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "FGetObject failed", err)
return
@@ -7513,7 +7713,7 @@ func testFunctional() {
return
}
- transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
+ transport := createHTTPTransport()
if err != nil {
logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
return
@@ -7613,7 +7813,6 @@ func testFunctional() {
"reqParams": reqParams,
}
presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams)
-
if err != nil {
logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
return
@@ -7770,14 +7969,12 @@ func testFunctional() {
"objectName": objectName,
}
err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
}
args["objectName"] = objectName + "-f"
err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
@@ -7785,7 +7982,6 @@ func testFunctional() {
args["objectName"] = objectName + "-nolength"
err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
@@ -7793,7 +7989,6 @@ func testFunctional() {
args["objectName"] = objectName + "-presigned"
err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
@@ -7801,7 +7996,6 @@ func testFunctional() {
args["objectName"] = objectName + "-presign-custom"
err = c.RemoveObject(context.Background(), bucketName, objectName+"-presign-custom", minio.RemoveObjectOptions{})
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveObject failed", err)
return
@@ -7813,7 +8007,6 @@ func testFunctional() {
"bucketName": bucketName,
}
err = c.RemoveBucket(context.Background(), bucketName)
-
if err != nil {
logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
return
@@ -12281,7 +12474,6 @@ func testFunctionalV2() {
"bucketPolicy": readWritePolicy,
}
err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy)
-
if err != nil {
logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
return
@@ -12452,18 +12644,12 @@ func testFunctionalV2() {
return
}
- transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS)))
- if err != nil {
- logError(testName, function, args, startTime, "", "DefaultTransport failed", err)
- return
- }
-
httpClient := &http.Client{
// Setting a sensible time out of 30secs to wait for response
// headers. Request is pro-actively canceled after 30secs
// with no response.
Timeout: 30 * time.Second,
- Transport: transport,
+ Transport: createHTTPTransport(),
}
req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil)
@@ -13012,7 +13198,6 @@ func testGetObjectACLContext() {
ContentType: "binary/octet-stream",
UserMetadata: metaData,
})
-
if err != nil {
logError(testName, function, args, startTime, "", "PutObject failed", err)
return
@@ -13491,6 +13676,844 @@ func testListObjects() {
logSuccess(testName, function, args, startTime)
}
+// testCors is runnable against S3 itself.
+// Just provide the env var MINIO_GO_TEST_BUCKET_CORS with bucket that is public and WILL BE DELETED.
+// Recreate this manually each time. Minio-go SDK does not support calling
+// SetPublicBucket (put-public-access-block) on S3, otherwise we could script the whole thing.
+func testCors() {
+ ctx := context.Background()
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketCors(bucketName, cors)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "cors": "",
+ }
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Create or reuse a bucket that will get cors settings applied to it and deleted when done
+ bucketName := os.Getenv("MINIO_GO_TEST_BUCKET_CORS")
+ if bucketName == "" {
+ bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ }
+ args["bucketName"] = bucketName
+ defer cleanupBucket(bucketName, c)
+
+ publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}`
+ err = c.SetBucketPolicy(ctx, bucketName, publicPolicy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+
+ // Upload an object for testing.
+ objectContents := `some-text-file-contents`
+ reader := strings.NewReader(objectContents)
+ bufSize := int64(len(objectContents))
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+ bucketURL := c.EndpointURL().String() + "/" + bucketName + "/"
+ objectURL := bucketURL + objectName
+
+ httpClient := &http.Client{
+ Timeout: 30 * time.Second,
+ Transport: createHTTPTransport(),
+ }
+
+ errStrAccessForbidden := `AccessForbidden
CORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted`
+ testCases := []struct {
+ name string
+
+ // Cors rules to apply
+ applyCorsRules []cors.Rule
+
+ // Outbound request info
+ method string
+ url string
+ headers map[string]string
+
+ // Wanted response
+ wantStatus int
+ wantHeaders map[string]string
+ wantBodyContains string
+ }{
+ {
+ name: "apply bucket rules",
+ applyCorsRules: []cors.Rule{
+ {
+ AllowedOrigin: []string{"https"}, // S3 documents 'https' origin, but it does not actually work, see test below.
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example1.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ ExposeHeader: []string{"x-amz-server-side-encryption", "x-amz-request-id"},
+ MaxAgeSeconds: 3600,
+ },
+ {
+ AllowedOrigin: []string{"http://www.example2.com"},
+ AllowedMethod: []string{"POST"},
+ AllowedHeader: []string{"X-My-Special-Header"},
+ ExposeHeader: []string{"X-AMZ-Request-ID"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example3.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"X-Example-3-Special-Header"},
+ MaxAgeSeconds: 10,
+ },
+ {
+ AllowedOrigin: []string{"*"},
+ AllowedMethod: []string{"GET"},
+ AllowedHeader: []string{"*"},
+ ExposeHeader: []string{"x-amz-request-id", "X-AMZ-server-side-encryption"},
+ MaxAgeSeconds: 3600,
+ },
+ {
+ AllowedOrigin: []string{"http://multiplemethodstest.com"},
+ AllowedMethod: []string{"POST", "PUT", "DELETE"},
+ AllowedHeader: []string{"x-abc-*", "x-def-*"},
+ },
+ {
+ AllowedOrigin: []string{"http://UPPERCASEEXAMPLE.com"},
+ AllowedMethod: []string{"DELETE"},
+ },
+ {
+ AllowedOrigin: []string{"https://*"},
+ AllowedMethod: []string{"DELETE"},
+ AllowedHeader: []string{"x-abc-*", "x-def-*"},
+ },
+ },
+ },
+ {
+ name: "preflight to object url matches example1 rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "",
+ },
+ },
+ {
+ name: "preflight to bucket url matches example1 rule",
+ method: http.MethodOptions,
+ url: bucketURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches example2 rule with header given",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example2.com",
+ "Access-Control-Request-Method": "POST",
+ "Access-Control-Request-Headers": "X-My-Special-Header",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example2.com",
+ "Access-Control-Allow-Methods": "POST",
+ "Access-Control-Allow-Headers": "x-my-special-header",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches example2 rule with no header given",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example2.com",
+ "Access-Control-Request-Method": "POST",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example2.com",
+ "Access-Control-Allow-Methods": "POST",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight matches wildcard origin rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.couldbeanything.com",
+ "Access-Control-Request-Method": "GET",
+ "Access-Control-Request-Headers": "x-custom-header,x-other-custom-header",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Methods": "GET",
+ "Access-Control-Allow-Headers": "x-custom-header,x-other-custom-header",
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ },
+ },
+ {
+ name: "preflight does not match any rule",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.couldbeanything.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight does not match example1 rule because of method",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "POST",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight if cors headers present test get",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Request-Method": "PUT",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ // "Access-Control-Allow-Methods": "PUT",
+ // "Access-Control-Max-Age": "3600",
+ },
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight if cors headers present test put",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "GET",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Expose-Headers": "x-amz-request-id,x-amz-server-side-encryption",
+ // S3 additionally sets the following headers here, MinIO follows fetch spec and does not:
+ // "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ // "Access-Control-Allow-Methods": "PUT",
+ // "Access-Control-Max-Age": "3600",
+ },
+ },
+ {
+ name: "s3 processes cors rules even when request is not preflight but there is no rule match",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "get request matches wildcard origin rule and returns cors headers",
+ method: http.MethodGet,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Origin": "*",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-request-id,X-AMZ-server-side-encryption",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "GET",
+ },
+ },
+ {
+ name: "head request does not match rule and returns no cors headers",
+ method: http.MethodHead,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.nomatchingdomainfound.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put request with origin does not match rule and returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.nomatchingdomainfound.com",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put request with no origin does not match rule and returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{},
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request with wildcard origin does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.notsecureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight for delete request with wildcard https origin matches secureexample",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "https://www.secureexample.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request matches secureexample with wildcard https origin and request headers",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "https://www.secureexample.com",
+ "Access-Control-Allow-Headers": "x-abc-1,x-abc-second,x-def-1",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight for delete request matches secureexample rejected because request header does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.secureexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1,x-abc-second,x-def-1,x-does-not-match",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight with https origin is documented by s3 as matching but it does not match",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "https://www.securebutdoesnotmatch.com",
+ "Access-Control-Request-Method": "PUT",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "put no origin no match returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{},
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put with origin match example1 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "put with origin and header match example1 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "x-could-be-anything": "myvalue",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "3600",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "put no match found returns no cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.unmatchingdomain.com",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "",
+ "Access-Control-Allow-Methods": "",
+ "Access-Control-Allow-Origin": "",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "put with origin match example3 returns cors headers",
+ method: http.MethodPut,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example3.com",
+ "X-My-Special-Header": "myvalue",
+ },
+ wantStatus: http.StatusOK,
+
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://www.example3.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Max-Age": "10",
+ // "Access-Control-Allow-Methods": "PUT",
+ },
+ },
+ {
+ name: "preflight matches example1 rule headers case is incorrect",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ // Fetch standard guarantees that these are sent lowercase, here we test what happens when they are not.
+ "Access-Control-Request-Headers": "X-Another-Header,X-Could-Be-Anything",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "x-another-header,x-could-be-anything",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ },
+ },
+ {
+ name: "preflight matches example1 rule headers are not sorted",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.example1.com",
+ "Access-Control-Request-Method": "PUT",
+ // Fetch standard guarantees that these are sorted, test what happens when they are not.
+ "Access-Control-Request-Headers": "a-customer-header,b-should-be-last",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Origin": "http://www.example1.com",
+ "Access-Control-Allow-Methods": "PUT",
+ "Access-Control-Allow-Headers": "a-customer-header,b-should-be-last",
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Max-Age": "3600",
+ "Content-Length": "0",
+ // S3 returns the following headers, MinIO follows fetch spec and does not:
+ // "Access-Control-Expose-Headers": "x-amz-server-side-encryption,x-amz-request-id",
+ },
+ },
+ {
+ name: "preflight with case sensitivity in origin matches uppercase",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Methods": "DELETE",
+ "Access-Control-Allow-Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Allow-Headers": "",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ },
+ },
+ {
+ name: "preflight with case sensitivity in origin does not match when lowercase",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://uppercaseexample.com",
+ "Access-Control-Request-Method": "DELETE",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight match upper case with unknown header but no header restrictions",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://UPPERCASEEXAMPLE.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-unknown-1",
+ },
+ wantStatus: http.StatusForbidden,
+ wantBodyContains: errStrAccessForbidden,
+ },
+ {
+ name: "preflight for delete request matches multiplemethodstest.com origin and request headers",
+ method: http.MethodOptions,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://multiplemethodstest.com",
+ "Access-Control-Request-Method": "DELETE",
+ "Access-Control-Request-Headers": "x-abc-1",
+ },
+ wantStatus: http.StatusOK,
+ wantHeaders: map[string]string{
+ "Access-Control-Allow-Credentials": "true",
+ "Access-Control-Allow-Origin": "http://multiplemethodstest.com",
+ "Access-Control-Allow-Headers": "x-abc-1",
+ "Access-Control-Expose-Headers": "",
+ "Access-Control-Max-Age": "",
+ // S3 returns POST, PUT, DELETE here, MinIO does not as spec does not require it.
+ // "Access-Control-Allow-Methods": "DELETE",
+ },
+ },
+ {
+ name: "delete request goes ahead because cors is only for browsers and does not block on the server side",
+ method: http.MethodDelete,
+ url: objectURL,
+ headers: map[string]string{
+ "Origin": "http://www.justrandom.com",
+ },
+ wantStatus: http.StatusNoContent,
+ },
+ }
+
+ for i, test := range testCases {
+ testName := fmt.Sprintf("%s_%d_%s", testName, i+1, strings.ReplaceAll(test.name, " ", "_"))
+
+ // Apply the CORS rules
+ if test.applyCorsRules != nil {
+ corsConfig := &cors.Config{
+ CORSRules: test.applyCorsRules,
+ }
+ err = c.SetBucketCors(ctx, bucketName, corsConfig)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
+ return
+ }
+ }
+
+ // Make request
+ if test.method != "" && test.url != "" {
+ req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request creation failed", err)
+ return
+ }
+ req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion)
+
+ for k, v := range test.headers {
+ req.Header.Set(k, v)
+ }
+ resp, err := httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request failed", err)
+ return
+ }
+ defer resp.Body.Close()
+
+ // Check returned status code
+ if resp.StatusCode != test.wantStatus {
+ errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode)
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+
+ // Check returned body
+ if test.wantBodyContains != "" {
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to read response body", err)
+ return
+ }
+ if !strings.Contains(string(body), test.wantBodyContains) {
+ errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body))
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+ }
+
+ // Check returned response headers
+ for k, v := range test.wantHeaders {
+ gotVal := resp.Header.Get(k)
+ if k == "Access-Control-Expose-Headers" {
+ // MinIO returns this in canonical form, S3 does not.
+ gotVal = strings.ToLower(gotVal)
+ v = strings.ToLower(v)
+ }
+ // Remove all spaces, S3 adds spaces after CSV values in headers, MinIO does not.
+ gotVal = strings.ReplaceAll(gotVal, " ", "")
+ if gotVal != v {
+ errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal)
+ logError(testName, function, args, startTime, "", errStr, nil)
+ return
+ }
+ }
+ }
+ logSuccess(testName, function, args, startTime)
+ }
+ logSuccess(testName, function, args, startTime)
+}
+
+func testCorsSetGetDelete() {
+ ctx := context.Background()
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketCors(bucketName, cors)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "cors": "",
+ }
+
+ // Instantiate new minio client object
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer cleanupBucket(bucketName, c)
+
+ // Set the CORS rules on the new bucket
+ corsRules := []cors.Rule{
+ {
+ AllowedOrigin: []string{"http://www.example1.com"},
+ AllowedMethod: []string{"PUT"},
+ AllowedHeader: []string{"*"},
+ },
+ {
+ AllowedOrigin: []string{"http://www.example2.com"},
+ AllowedMethod: []string{"POST"},
+ AllowedHeader: []string{"X-My-Special-Header"},
+ },
+ {
+ AllowedOrigin: []string{"*"},
+ AllowedMethod: []string{"GET"},
+ AllowedHeader: []string{"*"},
+ },
+ }
+ corsConfig := cors.NewConfig(corsRules)
+ err = c.SetBucketCors(ctx, bucketName, corsConfig)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err)
+ return
+ }
+
+ // Get the rules and check they match what we set
+ gotCorsConfig, err := c.GetBucketCors(ctx, bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
+ return
+ }
+ if !reflect.DeepEqual(corsConfig, gotCorsConfig) {
+ msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig)
+ logError(testName, function, args, startTime, "", msg, nil)
+ return
+ }
+
+ // Delete the rules
+ err = c.SetBucketCors(ctx, bucketName, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err)
+ return
+ }
+
+ // Get the rules and check they are now empty
+ gotCorsConfig, err = c.GetBucketCors(ctx, bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors failed", err)
+ return
+ }
+ if gotCorsConfig != nil {
+ logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Test deleting multiple objects with object retention set in Governance mode
func testRemoveObjects() {
// initialize logging params
@@ -13627,6 +14650,245 @@ func testRemoveObjects() {
logSuccess(testName, function, args, startTime)
}
+// Test get bucket tags
+func testGetBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetBucketTagging(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server failed", err)
+ return
+ }
+
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test setting tags for bucket
+func testSetBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketTagging(bucketName, tags)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "tags": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ tag := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ t, err := tags.MapToBucketTags(map[string]string{
+ tag: expectedValue,
+ })
+ args["tags"] = t.String()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err)
+ return
+ }
+
+ err = c.SetBucketTagging(context.Background(), bucketName, t)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketTagging failed", err)
+ return
+ }
+
+ tagging, err := c.GetBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketTagging failed", err)
+ return
+ }
+
+ if tagging.ToMap()[tag] != expectedValue {
+ msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue)
+ logError(testName, function, args, startTime, "", msg, err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
+// Test removing bucket tags
+func testRemoveBucketTagging() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveBucketTagging(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(os.Getenv(serverEndpoint),
+ &minio.Options{
+ Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""),
+ Transport: createHTTPTransport(),
+ Secure: mustParseBool(os.Getenv(enableHTTPS)),
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("MinIO-go-FunctionalTest", appVersion)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ tag := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ expectedValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ t, err := tags.MapToBucketTags(map[string]string{
+ tag: expectedValue,
+ })
+ if err != nil {
+ logError(testName, function, args, startTime, "", "tags.MapToBucketTags failed", err)
+ return
+ }
+
+ err = c.SetBucketTagging(context.Background(), bucketName, t)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketTagging failed", err)
+ return
+ }
+
+ tagging, err := c.GetBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketTagging failed", err)
+ return
+ }
+
+ if tagging.ToMap()[tag] != expectedValue {
+ msg := fmt.Sprintf("Tag %s; got value %s; wanted %s", tag, tagging.ToMap()[tag], expectedValue)
+ logError(testName, function, args, startTime, "", msg, err)
+ return
+ }
+
+ err = c.RemoveBucketTagging(context.Background(), bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveBucketTagging failed", err)
+ return
+ }
+
+ _, err = c.GetBucketTagging(context.Background(), bucketName)
+ if minio.ToErrorResponse(err).Code != "NoSuchTagSet" {
+ logError(testName, function, args, startTime, "", "Invalid error from server", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupVersionedBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "CleanupBucket failed", err)
+ return
+ }
+
+ logSuccess(testName, function, args, startTime)
+}
+
// Convert string to bool and always return false if any error
func mustParseBool(str string) bool {
b, err := strconv.ParseBool(str)
@@ -13660,6 +14922,8 @@ func main() {
// execute tests
if isFullMode() {
+ testCorsSetGetDelete()
+ testCors()
testListMultipartUpload()
testGetObjectAttributes()
testGetObjectAttributesErrorCases()
@@ -13676,7 +14940,9 @@ func main() {
testCompose10KSourcesV2()
testUserMetadataCopyingV2()
testPutObjectWithChecksums()
- testPutMultipartObjectWithChecksums()
+ testPutObjectWithTrailingChecksums()
+ testPutMultipartObjectWithChecksums(false)
+ testPutMultipartObjectWithChecksums(true)
testPutObject0ByteV2()
testPutObjectNoLengthV2()
testPutObjectsUnknownV2()
@@ -13731,6 +14997,9 @@ func main() {
testObjectTaggingWithVersioning()
testTrailingChecksums()
testPutObjectWithAutomaticChecksums()
+ testGetBucketTagging()
+ testSetBucketTagging()
+ testRemoveBucketTagging()
// SSE-C tests will only work over TLS connection.
if tls {
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go
new file mode 100644
index 00000000..e71864ee
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/v7/pkg/cors/cors.go
@@ -0,0 +1,91 @@
+/*
+ * MinIO Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2024 MinIO, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package cors
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/dustin/go-humanize"
+)
+
+const defaultXMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
+
+// Config is the container for a CORS configuration for a bucket.
+type Config struct {
+ XMLNS string `xml:"xmlns,attr,omitempty"`
+ XMLName xml.Name `xml:"CORSConfiguration"`
+ CORSRules []Rule `xml:"CORSRule"`
+}
+
+// Rule is a single rule in a CORS configuration.
+type Rule struct {
+ AllowedHeader []string `xml:"AllowedHeader,omitempty"`
+ AllowedMethod []string `xml:"AllowedMethod,omitempty"`
+ AllowedOrigin []string `xml:"AllowedOrigin,omitempty"`
+ ExposeHeader []string `xml:"ExposeHeader,omitempty"`
+ ID string `xml:"ID,omitempty"`
+ MaxAgeSeconds int `xml:"MaxAgeSeconds,omitempty"`
+}
+
+// NewConfig creates a new CORS configuration with the given rules.
+func NewConfig(rules []Rule) *Config {
+ return &Config{
+ XMLNS: defaultXMLNS,
+ XMLName: xml.Name{
+ Local: "CORSConfiguration",
+ Space: defaultXMLNS,
+ },
+ CORSRules: rules,
+ }
+}
+
+// ParseBucketCorsConfig parses a CORS configuration in XML from an io.Reader.
+func ParseBucketCorsConfig(reader io.Reader) (*Config, error) {
+ var c Config
+
+ // Max size of cors document is 64KiB according to https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html
+ // This limiter is just for safety so has a max of 128KiB
+ err := xml.NewDecoder(io.LimitReader(reader, 128*humanize.KiByte)).Decode(&c)
+ if err != nil {
+ return nil, fmt.Errorf("decoding xml: %w", err)
+ }
+ if c.XMLNS == "" {
+ c.XMLNS = defaultXMLNS
+ }
+ for i, rule := range c.CORSRules {
+ for j, method := range rule.AllowedMethod {
+ c.CORSRules[i].AllowedMethod[j] = strings.ToUpper(method)
+ }
+ }
+ return &c, nil
+}
+
+// ToXML marshals the CORS configuration to XML.
+func (c Config) ToXML() ([]byte, error) {
+ if c.XMLNS == "" {
+ c.XMLNS = defaultXMLNS
+ }
+ data, err := xml.Marshal(&c)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling xml: %w", err)
+ }
+ return append([]byte(xml.Header), data...), nil
+}
diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go
index 3f023704..19687e02 100644
--- a/vendor/github.com/minio/minio-go/v7/post-policy.go
+++ b/vendor/github.com/minio/minio-go/v7/post-policy.go
@@ -301,6 +301,25 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error {
return nil
}
+// SetUserMetadataStartsWith - Set how an user metadata should starts with.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return errInvalidArgument("Key is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "starts-with",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
// SetChecksum sets the checksum of the request.
func (p *PostPolicy) SetChecksum(c Checksum) {
if c.IsSet() {
diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go
index 5ddcad89..d15eb590 100644
--- a/vendor/github.com/minio/minio-go/v7/retry.go
+++ b/vendor/github.com/minio/minio-go/v7/retry.go
@@ -129,9 +129,10 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) {
}
// For now, all http Do() requests are retriable except some well defined errors
-func isRequestErrorRetryable(err error) bool {
+func isRequestErrorRetryable(ctx context.Context, err error) bool {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- return false
+ // Retry if internal timeout in the HTTP call.
+ return ctx.Err() == nil
}
if ue, ok := err.(*url.Error); ok {
e := ue.Unwrap()
diff --git a/vendor/github.com/minio/minio-go/v7/s3-error.go b/vendor/github.com/minio/minio-go/v7/s3-error.go
index f365157e..f7fad19f 100644
--- a/vendor/github.com/minio/minio-go/v7/s3-error.go
+++ b/vendor/github.com/minio/minio-go/v7/s3-error.go
@@ -57,5 +57,6 @@ var s3ErrorResponseMap = map[string]string{
"BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
"InvalidDuration": "Duration provided in the request is invalid.",
"XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ "NoSuchCORSConfiguration": "The specified bucket does not have a CORS configuration.",
// Add new API errors here.
}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
deleted file mode 100644
index daf913b1..00000000
--- a/vendor/github.com/pkg/errors/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
deleted file mode 100644
index 9159de03..00000000
--- a/vendor/github.com/pkg/errors/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go_import_path: github.com/pkg/errors
-go:
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-script:
- - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
deleted file mode 100644
index 835ba3e7..00000000
--- a/vendor/github.com/pkg/errors/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
-Copyright (c) 2015, Dave Cheney
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
deleted file mode 100644
index ce9d7cde..00000000
--- a/vendor/github.com/pkg/errors/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-PKGS := github.com/pkg/errors
-SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
-GO := go
-
-check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
-
-test:
- $(GO) test $(PKGS)
-
-vet: | test
- $(GO) vet $(PKGS)
-
-staticcheck:
- $(GO) get honnef.co/go/tools/cmd/staticcheck
- staticcheck -checks all $(PKGS)
-
-misspell:
- $(GO) get github.com/client9/misspell/cmd/misspell
- misspell \
- -locale GB \
- -error \
- *.md *.go
-
-unconvert:
- $(GO) get github.com/mdempsky/unconvert
- unconvert -v $(PKGS)
-
-ineffassign:
- $(GO) get github.com/gordonklaus/ineffassign
- find $(SRCDIRS) -name '*.go' | xargs ineffassign
-
-pedantic: check errcheck
-
-unparam:
- $(GO) get mvdan.cc/unparam
- unparam ./...
-
-errcheck:
- $(GO) get github.com/kisielk/errcheck
- errcheck $(PKGS)
-
-gofmt:
- @echo Checking code is gofmted
- @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
deleted file mode 100644
index 54dfdcb1..00000000
--- a/vendor/github.com/pkg/errors/README.md
+++ /dev/null
@@ -1,59 +0,0 @@
-# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
-
-Package errors provides simple error handling primitives.
-
-`go get github.com/pkg/errors`
-
-The traditional error handling idiom in Go is roughly akin to
-```go
-if err != nil {
- return err
-}
-```
-which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
-
-## Adding context to an error
-
-The errors.Wrap function returns a new error that adds context to the original error. For example
-```go
-_, err := ioutil.ReadAll(r)
-if err != nil {
- return errors.Wrap(err, "read failed")
-}
-```
-## Retrieving the cause of an error
-
-Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
-```go
-type causer interface {
- Cause() error
-}
-```
-`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
-```go
-switch err := errors.Cause(err).(type) {
-case *MyError:
- // handle specifically
-default:
- // unknown error
-}
-```
-
-[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
-
-## Roadmap
-
-With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
-
-- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
-- 1.0. Final release.
-
-## Contributing
-
-Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
-
-Before sending a PR, please discuss your change by raising an issue.
-
-## License
-
-BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
deleted file mode 100644
index a932eade..00000000
--- a/vendor/github.com/pkg/errors/appveyor.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-version: build-{build}.{branch}
-
-clone_folder: C:\gopath\src\github.com\pkg\errors
-shallow_clone: true # for startup speed
-
-environment:
- GOPATH: C:\gopath
-
-platform:
- - x64
-
-# http://www.appveyor.com/docs/installed-software
-install:
- # some helpful output for debugging builds
- - go version
- - go env
- # pre-installed MinGW at C:\MinGW is 32bit only
- # but MSYS2 at C:\msys64 has mingw64
- - set PATH=C:\msys64\mingw64\bin;%PATH%
- - gcc --version
- - g++ --version
-
-build_script:
- - go install -v ./...
-
-test_script:
- - set PATH=C:\gopath\bin;%PATH%
- - go test -v ./...
-
-#artifacts:
-# - path: '%GOPATH%\bin\*.exe'
-deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
deleted file mode 100644
index 161aea25..00000000
--- a/vendor/github.com/pkg/errors/errors.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Package errors provides simple error handling primitives.
-//
-// The traditional error handling idiom in Go is roughly akin to
-//
-// if err != nil {
-// return err
-// }
-//
-// which when applied recursively up the call stack results in error reports
-// without context or debugging information. The errors package allows
-// programmers to add context to the failure path in their code in a way
-// that does not destroy the original value of the error.
-//
-// Adding context to an error
-//
-// The errors.Wrap function returns a new error that adds context to the
-// original error by recording a stack trace at the point Wrap is called,
-// together with the supplied message. For example
-//
-// _, err := ioutil.ReadAll(r)
-// if err != nil {
-// return errors.Wrap(err, "read failed")
-// }
-//
-// If additional control is required, the errors.WithStack and
-// errors.WithMessage functions destructure errors.Wrap into its component
-// operations: annotating an error with a stack trace and with a message,
-// respectively.
-//
-// Retrieving the cause of an error
-//
-// Using errors.Wrap constructs a stack of errors, adding context to the
-// preceding error. Depending on the nature of the error it may be necessary
-// to reverse the operation of errors.Wrap to retrieve the original error
-// for inspection. Any error value which implements this interface
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// can be inspected by errors.Cause. errors.Cause will recursively retrieve
-// the topmost error that does not implement causer, which is assumed to be
-// the original cause. For example:
-//
-// switch err := errors.Cause(err).(type) {
-// case *MyError:
-// // handle specifically
-// default:
-// // unknown error
-// }
-//
-// Although the causer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// Formatted printing of errors
-//
-// All error values returned from this package implement fmt.Formatter and can
-// be formatted by the fmt package. The following verbs are supported:
-//
-// %s print the error. If the error has a Cause it will be
-// printed recursively.
-// %v see %s
-// %+v extended format. Each Frame of the error's StackTrace will
-// be printed in detail.
-//
-// Retrieving the stack trace of an error or wrapper
-//
-// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
-// invoked. This information can be retrieved with the following interface:
-//
-// type stackTracer interface {
-// StackTrace() errors.StackTrace
-// }
-//
-// The returned errors.StackTrace type is defined as
-//
-// type StackTrace []Frame
-//
-// The Frame type represents a call site in the stack trace. Frame supports
-// the fmt.Formatter interface that can be used for printing information about
-// the stack trace of this error. For example:
-//
-// if err, ok := err.(stackTracer); ok {
-// for _, f := range err.StackTrace() {
-// fmt.Printf("%+s:%d\n", f, f)
-// }
-// }
-//
-// Although the stackTracer interface is not exported by this package, it is
-// considered a part of its stable public interface.
-//
-// See the documentation for Frame.Format for more details.
-package errors
-
-import (
- "fmt"
- "io"
-)
-
-// New returns an error with the supplied message.
-// New also records the stack trace at the point it was called.
-func New(message string) error {
- return &fundamental{
- msg: message,
- stack: callers(),
- }
-}
-
-// Errorf formats according to a format specifier and returns the string
-// as a value that satisfies error.
-// Errorf also records the stack trace at the point it was called.
-func Errorf(format string, args ...interface{}) error {
- return &fundamental{
- msg: fmt.Sprintf(format, args...),
- stack: callers(),
- }
-}
-
-// fundamental is an error that has a message and a stack, but no caller.
-type fundamental struct {
- msg string
- *stack
-}
-
-func (f *fundamental) Error() string { return f.msg }
-
-func (f *fundamental) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- io.WriteString(s, f.msg)
- f.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, f.msg)
- case 'q':
- fmt.Fprintf(s, "%q", f.msg)
- }
-}
-
-// WithStack annotates err with a stack trace at the point WithStack was called.
-// If err is nil, WithStack returns nil.
-func WithStack(err error) error {
- if err == nil {
- return nil
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-type withStack struct {
- error
- *stack
-}
-
-func (w *withStack) Cause() error { return w.error }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withStack) Unwrap() error { return w.error }
-
-func (w *withStack) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v", w.Cause())
- w.stack.Format(s, verb)
- return
- }
- fallthrough
- case 's':
- io.WriteString(s, w.Error())
- case 'q':
- fmt.Fprintf(s, "%q", w.Error())
- }
-}
-
-// Wrap returns an error annotating err with a stack trace
-// at the point Wrap is called, and the supplied message.
-// If err is nil, Wrap returns nil.
-func Wrap(err error, message string) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: message,
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// Wrapf returns an error annotating err with a stack trace
-// at the point Wrapf is called, and the format specifier.
-// If err is nil, Wrapf returns nil.
-func Wrapf(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- err = &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
- return &withStack{
- err,
- callers(),
- }
-}
-
-// WithMessage annotates err with a new message.
-// If err is nil, WithMessage returns nil.
-func WithMessage(err error, message string) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: message,
- }
-}
-
-// WithMessagef annotates err with the format specifier.
-// If err is nil, WithMessagef returns nil.
-func WithMessagef(err error, format string, args ...interface{}) error {
- if err == nil {
- return nil
- }
- return &withMessage{
- cause: err,
- msg: fmt.Sprintf(format, args...),
- }
-}
-
-type withMessage struct {
- cause error
- msg string
-}
-
-func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
-func (w *withMessage) Cause() error { return w.cause }
-
-// Unwrap provides compatibility for Go 1.13 error chains.
-func (w *withMessage) Unwrap() error { return w.cause }
-
-func (w *withMessage) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- if s.Flag('+') {
- fmt.Fprintf(s, "%+v\n", w.Cause())
- io.WriteString(s, w.msg)
- return
- }
- fallthrough
- case 's', 'q':
- io.WriteString(s, w.Error())
- }
-}
-
-// Cause returns the underlying cause of the error, if possible.
-// An error value has a cause if it implements the following
-// interface:
-//
-// type causer interface {
-// Cause() error
-// }
-//
-// If the error does not implement Cause, the original error will
-// be returned. If the error is nil, nil will be returned without further
-// investigation.
-func Cause(err error) error {
- type causer interface {
- Cause() error
- }
-
- for err != nil {
- cause, ok := err.(causer)
- if !ok {
- break
- }
- err = cause.Cause()
- }
- return err
-}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
deleted file mode 100644
index be0d10d0..00000000
--- a/vendor/github.com/pkg/errors/go113.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build go1.13
-
-package errors
-
-import (
- stderrors "errors"
-)
-
-// Is reports whether any error in err's chain matches target.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-func Is(err, target error) bool { return stderrors.Is(err, target) }
-
-// As finds the first error in err's chain that matches target, and if so, sets
-// target to that error value and returns true.
-//
-// The chain consists of err itself followed by the sequence of errors obtained by
-// repeatedly calling Unwrap.
-//
-// An error matches target if the error's concrete value is assignable to the value
-// pointed to by target, or if the error has a method As(interface{}) bool such that
-// As(target) returns true. In the latter case, the As method is responsible for
-// setting target.
-//
-// As will panic if target is not a non-nil pointer to either a type that implements
-// error, or to any interface type. As returns false if err is nil.
-func As(err error, target interface{}) bool { return stderrors.As(err, target) }
-
-// Unwrap returns the result of calling the Unwrap method on err, if err's
-// type contains an Unwrap method returning error.
-// Otherwise, Unwrap returns nil.
-func Unwrap(err error) error {
- return stderrors.Unwrap(err)
-}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
deleted file mode 100644
index 779a8348..00000000
--- a/vendor/github.com/pkg/errors/stack.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package errors
-
-import (
- "fmt"
- "io"
- "path"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Frame represents a program counter inside a stack frame.
-// For historical reasons if Frame is interpreted as a uintptr
-// its value represents the program counter + 1.
-type Frame uintptr
-
-// pc returns the program counter for this frame;
-// multiple frames may have the same PC value.
-func (f Frame) pc() uintptr { return uintptr(f) - 1 }
-
-// file returns the full path to the file that contains the
-// function for this Frame's pc.
-func (f Frame) file() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- file, _ := fn.FileLine(f.pc())
- return file
-}
-
-// line returns the line number of source code of the
-// function for this Frame's pc.
-func (f Frame) line() int {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return 0
- }
- _, line := fn.FileLine(f.pc())
- return line
-}
-
-// name returns the name of this function, if known.
-func (f Frame) name() string {
- fn := runtime.FuncForPC(f.pc())
- if fn == nil {
- return "unknown"
- }
- return fn.Name()
-}
-
-// Format formats the frame according to the fmt.Formatter interface.
-//
-// %s source file
-// %d source line
-// %n function name
-// %v equivalent to %s:%d
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+s function name and path of source file relative to the compile time
-// GOPATH separated by \n\t (\n\t)
-// %+v equivalent to %+s:%d
-func (f Frame) Format(s fmt.State, verb rune) {
- switch verb {
- case 's':
- switch {
- case s.Flag('+'):
- io.WriteString(s, f.name())
- io.WriteString(s, "\n\t")
- io.WriteString(s, f.file())
- default:
- io.WriteString(s, path.Base(f.file()))
- }
- case 'd':
- io.WriteString(s, strconv.Itoa(f.line()))
- case 'n':
- io.WriteString(s, funcname(f.name()))
- case 'v':
- f.Format(s, 's')
- io.WriteString(s, ":")
- f.Format(s, 'd')
- }
-}
-
-// MarshalText formats a stacktrace Frame as a text string. The output is the
-// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
-func (f Frame) MarshalText() ([]byte, error) {
- name := f.name()
- if name == "unknown" {
- return []byte(name), nil
- }
- return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
-}
-
-// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
-type StackTrace []Frame
-
-// Format formats the stack of Frames according to the fmt.Formatter interface.
-//
-// %s lists source files for each Frame in the stack
-// %v lists the source file and line number for each Frame in the stack
-//
-// Format accepts flags that alter the printing of some verbs, as follows:
-//
-// %+v Prints filename, function, and line number for each Frame in the stack.
-func (st StackTrace) Format(s fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case s.Flag('+'):
- for _, f := range st {
- io.WriteString(s, "\n")
- f.Format(s, verb)
- }
- case s.Flag('#'):
- fmt.Fprintf(s, "%#v", []Frame(st))
- default:
- st.formatSlice(s, verb)
- }
- case 's':
- st.formatSlice(s, verb)
- }
-}
-
-// formatSlice will format this StackTrace into the given buffer as a slice of
-// Frame, only valid when called with '%s' or '%v'.
-func (st StackTrace) formatSlice(s fmt.State, verb rune) {
- io.WriteString(s, "[")
- for i, f := range st {
- if i > 0 {
- io.WriteString(s, " ")
- }
- f.Format(s, verb)
- }
- io.WriteString(s, "]")
-}
-
-// stack represents a stack of program counters.
-type stack []uintptr
-
-func (s *stack) Format(st fmt.State, verb rune) {
- switch verb {
- case 'v':
- switch {
- case st.Flag('+'):
- for _, pc := range *s {
- f := Frame(pc)
- fmt.Fprintf(st, "\n%+v", f)
- }
- }
- }
-}
-
-func (s *stack) StackTrace() StackTrace {
- f := make([]Frame, len(*s))
- for i := 0; i < len(f); i++ {
- f[i] = Frame((*s)[i])
- }
- return f
-}
-
-func callers() *stack {
- const depth = 32
- var pcs [depth]uintptr
- n := runtime.Callers(3, pcs[:])
- var st stack = pcs[0:n]
- return &st
-}
-
-// funcname removes the path prefix component of a function's name reported by func.Name().
-func funcname(name string) string {
- i := strings.LastIndex(name, "/")
- name = name[i+1:]
- i = strings.Index(name, ".")
- return name[i+1:]
-}
diff --git a/vendor/github.com/prep/average/.travis.yml b/vendor/github.com/prep/average/.travis.yml
deleted file mode 100644
index 9fc8e3b1..00000000
--- a/vendor/github.com/prep/average/.travis.yml
+++ /dev/null
@@ -1,28 +0,0 @@
-language: go
-
-go:
- - 1.9
- - master
-
-# Skip the install step. Don't `go get` dependencies. Only build with the
-# code in vendor/
-install: true
-
-matrix:
- # It's ok if our code fails on unstable development versions of Go.
- allow_failures:
- - go: master
- # Don't wait for tip tests to finish. Mark the test run green if the
- # tests pass on the stable versions of Go.
- fast_finish: true
-
-notifications:
- email: false
-
-before_script:
- - GO_FILES=$(find . -iname '*.go' -type f | grep -v /vendor/)
-
-script:
- - test -z $(gofmt -s -l $GO_FILES)
- - go tool vet .
- - go test -v -race ./...
diff --git a/vendor/github.com/prep/average/README.md b/vendor/github.com/prep/average/README.md
deleted file mode 100644
index 60dbcc50..00000000
--- a/vendor/github.com/prep/average/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-average
-[![TravisCI](https://travis-ci.org/prep/average.svg?branch=master)](https://travis-ci.org/prep/average.svg?branch=master)
-[![Go Report Card](https://goreportcard.com/badge/github.com/prep/average)](https://goreportcard.com/report/github.com/prep/average)
-[![GoDoc](https://godoc.org/github.com/prep/average?status.svg)](https://godoc.org/github.com/prep/average)
-=======
-This stupidly named Go package contains a single struct that is used to implement counters on a sliding time window.
-
-Usage
------
-```go
-
-import (
- "fmt"
-
- "github.com/prep/average"
-)
-
-func main() {
- // Create a SlidingWindow that has a window of 15 minutes, with a
- // granulity of 1 minute.
- sw := average.MustNew(15 * time.Minute, time.Minute)
- defer sw.Stop()
-
- // Do some work.
- sw.Add(15)
- // Do some more work.
- sw.Add(22)
- // Do even more work.
- sw.Add(22)
-
- fmt.Printf("Average of last 1m: %f\n", sw.Average(time.Minute)
- fmt.Printf("Average of last 5m: %f\n", sw.Average(5 * time.Minute)
- fmt.Printf("Average of last 15m: %f\n\n", sw.Average(15 * time.Minute)
-
- total, numSamples := sw.Total(15 * time.Minute)
- fmt.Printf("Counter has a total of %d over %d samples", total, numSamples)
-}
-```
-
-License
--------
-This software is created for MessageBird B.V. and distributed under the BSD-style license found in the LICENSE file.
diff --git a/vendor/github.com/prep/average/slidingwindow.go b/vendor/github.com/prep/average/slidingwindow.go
deleted file mode 100644
index 793422dd..00000000
--- a/vendor/github.com/prep/average/slidingwindow.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Package average implements sliding time window.
-package average
-
-import (
- "errors"
- "sync"
- "time"
-)
-
-// SlidingWindow provides a sliding time window with a custom size and
-// granularity to store int64 counters. This can be used to determine the total
-// or unweighted mean average of a subset of the window size.
-type SlidingWindow struct {
- window time.Duration
- granularity time.Duration
- samples []int64
- pos int
- size int
- stopOnce sync.Once
- stopC chan struct{}
- sync.RWMutex
-}
-
-// MustNew returns a new SlidingWindow, but panics if an error occurs.
-func MustNew(window, granularity time.Duration) *SlidingWindow {
- sw, err := New(window, granularity)
- if err != nil {
- panic(err.Error())
- }
-
- return sw
-}
-
-// New returns a new SlidingWindow.
-func New(window, granularity time.Duration) (*SlidingWindow, error) {
- if window == 0 {
- return nil, errors.New("window cannot be 0")
- }
- if granularity == 0 {
- return nil, errors.New("granularity cannot be 0")
- }
- if window <= granularity || window%granularity != 0 {
- return nil, errors.New("window size has to be a multiplier of the granularity size")
- }
-
- sw := &SlidingWindow{
- window: window,
- granularity: granularity,
- samples: make([]int64, int(window/granularity)),
- stopC: make(chan struct{}),
- }
-
- go sw.shifter()
- return sw, nil
-}
-
-func (sw *SlidingWindow) shifter() {
- ticker := time.NewTicker(sw.granularity)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- sw.Lock()
- if sw.pos = sw.pos + 1; sw.pos >= len(sw.samples) {
- sw.pos = 0
- }
- sw.samples[sw.pos] = 0
- if sw.size < len(sw.samples) {
- sw.size++
- }
- sw.Unlock()
-
- case <-sw.stopC:
- return
- }
- }
-}
-
-// Add increments the value of the current sample.
-func (sw *SlidingWindow) Add(v int64) {
- sw.Lock()
- sw.samples[sw.pos] += v
- sw.Unlock()
-}
-
-// Average returns the unweighted mean of the specified window.
-func (sw *SlidingWindow) Average(window time.Duration) float64 {
- total, sampleCount := sw.Total(window)
- if sampleCount == 0 {
- return 0
- }
-
- return float64(total) / float64(sampleCount)
-}
-
-// Reset the samples in this sliding time window.
-func (sw *SlidingWindow) Reset() {
- sw.Lock()
- defer sw.Unlock()
-
- sw.pos, sw.size = 0, 0
- for i := range sw.samples {
- sw.samples[i] = 0
- }
-}
-
-// Stop the shifter of this sliding time window. A stopped SlidingWindow cannot
-// be started again.
-func (sw *SlidingWindow) Stop() {
- sw.stopOnce.Do(func() {
- sw.stopC <- struct{}{}
- })
-}
-
-// Total returns the sum of all values over the specified window, as well as
-// the number of samples.
-func (sw *SlidingWindow) Total(window time.Duration) (int64, int) {
- if window > sw.window {
- window = sw.window
- }
-
- sampleCount := int(window / sw.granularity)
- if sampleCount > sw.size {
- sampleCount = sw.size
- }
-
- sw.RLock()
- defer sw.RUnlock()
-
- var total int64
- for i := 1; i <= sampleCount; i++ {
- pos := sw.pos - i
- if pos < 0 {
- pos += len(sw.samples)
- }
-
- total += sw.samples[pos]
- }
-
- return total, sampleCount
-}
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
index dd878a30..b9cc55ab 100644
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prep/average/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
similarity index 96%
rename from vendor/github.com/prep/average/LICENSE
rename to vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
index 6a66aea5..65d761bc 100644
--- a/vendor/github.com/prep/average/LICENSE
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright (c) 2013 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
new file mode 100644
index 00000000..8547c8df
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 00000000..2e45780b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index ad9a71a5..520cbd7d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -22,13 +22,13 @@ import (
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
-// populated using runtime/metrics.
+// populated using runtime/metrics. Those are the defaults we can't alter.
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
@@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
@@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
@@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
- "Total number of frees.",
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
@@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
@@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
@@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
@@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
@@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
@@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
- "Number of allocated objects.",
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
@@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
@@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
@@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
@@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
@@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
@@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
@@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
@@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
@@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
@@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
@@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
"go_memstats_last_gc_time_seconds",
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
index 2d8d9f64..51174641 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -17,6 +17,7 @@
package prometheus
import (
+ "fmt"
"math"
"runtime"
"runtime/metrics"
@@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions {
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
},
RuntimeMetricRules: []internal.GoCollectorRule{
- //{Matcher: regexp.MustCompile("")},
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
},
}
}
@@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
+ help := attachOriginalName(d.Description.Description, d.Name)
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
@@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
- d.Description.Description,
+ help,
nil,
nil,
),
@@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
},
)
} else {
@@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
- Help: d.Description.Description,
+ Help: help,
})
}
metricSet = append(metricSet, m)
@@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
}
}
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s", desc, origName)
+}
+
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
@@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
- panic("unexpected unsupported metric")
+ panic("unexpected bad kind metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
- panic("unexpected unsupported metric kind")
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index b5c8bcb3..519db348 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -440,7 +440,7 @@ type HistogramOpts struct {
// constant (or any negative float value).
NativeHistogramZeroThreshold float64
- // The remaining fields define a strategy to limit the number of
+ // The next three fields define a strategy to limit the number of
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
// at zero, the number of buckets is not limited. (Note that this might
// lead to unbounded memory consumption if the values observed by the
@@ -473,6 +473,22 @@ type HistogramOpts struct {
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
if opts.afterFunc == nil {
opts.afterFunc = time.AfterFunc
}
+
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -725,7 +743,8 @@ type histogram struct {
// resetScheduled is protected by mtx. It is true if a reset is
// scheduled for a later time (when nativeHistogramMinResetDuration has
// passed).
- resetScheduled bool
+ resetScheduled bool
+ nativeExemplars nativeExemplars
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error {
Length: proto.Uint32(0),
}}
}
+
+ if h.nativeExemplars.isEnabled() {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) {
deleteSyncMap(&counts.nativeHistogramBucketsPositive)
}
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
panic(err)
}
h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -1336,6 +1371,48 @@ func MustNewConstHistogram(
return m
}
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) {
atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+ // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+ ttl time.Duration
+
+ exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ ttl = -1
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if !n.isEnabled() {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ var nIdx int
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ if len(n.exemplars) == 1 {
+ // When the number of exemplars is 1, then
+ // replace the existing exemplar with the new exemplar.
+ n.exemplars[0] = e
+ return
+ }
+ // From this point on, the number of exemplars is greater than 1.
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+ // The insertion point of the new exemplar in the exemplars slice after insertion.
+ // This is calculated purely based on the order of the exemplars by value.
+ // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+ nIdx = -1
+
+ // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+ // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+ // It is calculated in 3 steps:
+ // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+ // That is the following will be true (on log scale):
+ // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+ // the closest values to each other from all pairs.
+ // For example, suppose the values are distributed like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // Or like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+ // 3. We check if by inserting the new exemplar we would create a closer pair at
+ // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+ // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+ rIdx = -1
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if nIdx == -1 && *e.Value <= *exemplar.Value {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ // The closest exemplar pair is at index: i-1, i.
+ // Choose the exemplar with the older timestamp for replacement.
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ rIdx = i
+ } else {
+ rIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+ // Here, we have the following relationships:
+ // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+ // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ // If the oldest exemplar has expired, then replace it with the new exemplar.
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-n-----------x----------------x----x-----|
+ // nIdx-1--^ ^--new exemplar value
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ md = diff
+ rIdx = nIdx - 1
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-----------n-x----------------x----x-----|
+ // new exemplar value--^ ^--nIdx
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ rIdx = nIdx
+ }
+ }
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
index 723b45d6..a4fa6eab 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -30,3 +30,5 @@ type GoCollectorOptions struct {
RuntimeMetricSumForHist map[string]string
RuntimeMetricRules []GoCollectorRule
}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index f018e572..9d9b81ab 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
)
for i, e := range exemplars {
ts := e.Timestamp
- if ts == (time.Time{}) {
+ if ts.IsZero() {
ts = now
}
exs[i], err = newExemplar(e.Value, ts, e.Labels)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 8548dd18..62a4e7ad 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -22,14 +22,15 @@ import (
)
type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
}
if opts.PidFn == nil {
@@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
}
// Collect returns the current state of all metrics of the collector.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 8c1136ce..14d56d2d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
} else {
c.reportError(ch, nil, err)
}
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.IpExt.InOctets != nil {
+ inOctets = *netstat.IpExt.InOctets
+ }
+ if netstat.IpExt.OutOctets != nil {
+ outOctets = *netstat.IpExt.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index 9819917b..315eab5f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+ return r.ResponseWriter
+}
+
type (
closeNotifierDelegator struct{ *responseWriterDelegator }
flusherDelegator struct{ *responseWriterDelegator }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index 09b8d2fb..e598e66e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -38,12 +38,13 @@ import (
"io"
"net/http"
"strconv"
- "strings"
"sync"
"time"
+ "github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
)
@@ -54,6 +55,18 @@ const (
processStartTimeHeader = "Process-Start-Time-Unix"
)
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+ Identity Compression = "identity"
+ Gzip Compression = "gzip"
+ Zstd Compression = "zstd"
+)
+
+var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
+
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
}
+ // Select compression formats to offer based on default or user choice.
+ var compressions []string
+ if !opts.DisableCompression {
+ offers := defaultCompressionFormats
+ if len(opts.OfferedCompressions) > 0 {
+ offers = opts.OfferedCompressions
+ }
+ for _, comp := range offers {
+ compressions = append(compressions, string(comp))
+ }
+ }
+
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
@@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} else {
contentType = expfmt.Negotiate(req.Header)
}
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
+ rsp.Header().Set(contentTypeHeader, string(contentType))
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
+ w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error getting writer", err)
+ }
+ w = io.Writer(rsp)
+ encodingHeader = string(Identity)
+ }
- gz.Reset(w)
- defer gz.Close()
+ defer closeWriter()
- w = gz
+ // Set Content-Encoding only when data is compressed
+ if encodingHeader != string(Identity) {
+ rsp.Header().Set(contentEncodingHeader, encodingHeader)
}
-
enc := expfmt.NewEncoder(w, contentType)
// handleError handles the error according to opts.ErrorHandling
@@ -343,9 +370,19 @@ type HandlerOpts struct {
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
+ // DisableCompression disables the response encoding (compression) and
+ // encoding negotiation. If true, the handler will
+ // never compress the response, even if requested
+ // by the client and the OfferedCompressions field is set.
DisableCompression bool
+ // OfferedCompressions is a set of encodings (compressions) handler will
+ // try to offer when negotiating with the client. This defaults to identity, gzip
+ // and zstd.
+ // NOTE: If handler can't agree with the client on the encodings or
+ // unsupported or empty encodings are set in OfferedCompressions,
+ // handler always fallbacks to no compression (identity), for
+ // compatibility reasons. In such cases ErrorLog will be used if set.
+ OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -381,19 +418,6 @@ type HandlerOpts struct {
ProcessStartTime time.Time
}
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
-}
-
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
@@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) {
http.StatusInternalServerError,
)
}
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+ if len(compressions) == 0 {
+ return rw, string(Identity), func() {}, nil
+ }
+
+ // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
+ selected := httputil.NegotiateContentEncoding(r, compressions)
+
+ switch selected {
+ case "zstd":
+ // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
+ z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
+ if err != nil {
+ return nil, "", func() {}, err
+ }
+
+ z.Reset(rw)
+ return z, selected, func() { _ = z.Close() }, nil
+ case "gzip":
+ gz := gzipPool.Get().(*gzip.Writer)
+ gz.Reset(rw)
+ return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+ case "identity":
+ // This means the content is not compressed.
+ return rw, selected, func() {}, nil
+ default:
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 5e2ced25..c6fd2f58 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
+ continue
+ }
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
+ continue
}
+ newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 14627044..1ab0e479 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -783,3 +783,45 @@ func MustNewConstSummary(
}
return m
}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 955cfd59..2c808eec 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
return metric
}
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 25cfaa21..1448439b 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct)
if err != nil {
- return fmtUnknown
+ return FmtUnknown
}
const textType = "text/plain"
@@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format {
switch mediatype {
case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return fmtUnknown
+ return FmtUnknown
}
if e, ok := params["encoding"]; ok && e != "delimited" {
- return fmtUnknown
+ return FmtUnknown
}
- return fmtProtoDelim
+ return FmtProtoDelim
case textType:
if v, ok := params["version"]; ok && v != TextVersion {
- return fmtUnknown
+ return FmtUnknown
}
- return fmtText
+ return FmtText
}
- return fmtUnknown
+ return FmtUnknown
}
// NewDecoder returns a new decoder based on the given input format.
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index ff5ef7a9..cf0c150c 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return fmtProtoDelim + escapingScheme
+ return FmtProtoDelim + escapingScheme
case "text":
- return fmtProtoText + escapingScheme
+ return FmtProtoText + escapingScheme
case "compact-text":
- return fmtProtoCompact + escapingScheme
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
}
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return fmtProtoDelim + escapingScheme
+ return FmtProtoDelim + escapingScheme
case "text":
- return fmtProtoText + escapingScheme
+ return FmtProtoText + escapingScheme
case "compact-text":
- return fmtProtoCompact + escapingScheme
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
switch ver {
case OpenMetricsVersion_1_0_0:
- return fmtOpenMetrics_1_0_0 + escapingScheme
+ return FmtOpenMetrics_1_0_0 + escapingScheme
default:
- return fmtOpenMetrics_0_0_1 + escapingScheme
+ return FmtOpenMetrics_0_0_1 + escapingScheme
}
}
}
- return fmtText + escapingScheme
+ return FmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 051b38cd..d942af8e 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -32,24 +32,31 @@ type Format string
// it on the wire, new content-type strings will have to be agreed upon and
// added here.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
- // The Content-Type values for the different wire protocols. Note that these
- // values are now unexported. If code was relying on comparisons to these
- // constants, instead use FormatType().
- fmtUnknown Format = ``
- fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- fmtProtoDelim Format = protoFmt + ` encoding=delimited`
- fmtProtoText Format = protoFmt + ` encoding=text`
- fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
- fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
- fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
+ // The Content-Type values for the different wire protocols. Do not do direct
+ // comparisons to these constants, instead use the comparison functions.
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+ FmtUnknown Format = ``
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
@@ -79,17 +86,17 @@ const (
func NewFormat(t FormatType) Format {
switch t {
case TypeProtoCompact:
- return fmtProtoCompact
+ return FmtProtoCompact
case TypeProtoDelim:
- return fmtProtoDelim
+ return FmtProtoDelim
case TypeProtoText:
- return fmtProtoText
+ return FmtProtoText
case TypeTextPlain:
- return fmtText
+ return FmtText
case TypeOpenMetrics:
- return fmtOpenMetrics_1_0_0
+ return FmtOpenMetrics_1_0_0
default:
- return fmtUnknown
+ return FmtUnknown
}
}
@@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format {
// specified version number.
func NewOpenMetricsFormat(version string) (Format, error) {
if version == OpenMetricsVersion_0_0_1 {
- return fmtOpenMetrics_0_0_1, nil
+ return FmtOpenMetrics_0_0_1, nil
}
if version == OpenMetricsVersion_1_0_0 {
- return fmtOpenMetrics_1_0_0, nil
+ return FmtOpenMetrics_1_0_0, nil
}
- return fmtUnknown, fmt.Errorf("unknown open metrics version string")
+ return FmtUnknown, fmt.Errorf("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+ var terms []string
+ for _, p := range strings.Split(string(f), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ trimmed := strings.TrimSpace(p)
+ if len(trimmed) > 0 {
+ terms = append(terms, trimmed)
+ }
+ continue
+ }
+ key := strings.TrimSpace(toks[0])
+ if key != model.EscapingKey {
+ terms = append(terms, strings.TrimSpace(p))
+ }
+ }
+ terms = append(terms, model.EscapingKey+"="+s.String())
+ return Format(strings.Join(terms, "; "))
}
// FormatType deduces an overall FormatType for the given format.
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 353c5e93..11c8ff4b 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces, quoted.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index f9b8265a..4b86434b 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -354,7 +354,7 @@ func writeNameAndLabelPairs(
if name != "" {
// If the name does not pass the legacy validity check, we must put the
// metric name inside the braces.
- if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if !model.IsValidLegacyMetricName(name) {
metricInsideBraces = true
err := w.WriteByte(separator)
written++
@@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
// writeName writes a string as-is if it complies with the legacy naming
// scheme, or escapes it in double quotes if not.
func writeName(w enhancedWriter, name string) (int, error) {
- if model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ if model.IsValidLegacyMetricName(name) {
return w.WriteString(name)
}
var written int
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 26490211..f085a923 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -22,9 +22,9 @@ import (
"math"
"strconv"
"strings"
+ "unicode/utf8"
dto "github.com/prometheus/client_model/go"
-
"google.golang.org/protobuf/proto"
"github.com/prometheus/common/model"
@@ -60,6 +60,7 @@ type TextParser struct {
currentMF *dto.MetricFamily
currentMetric *dto.Metric
currentLabelPair *dto.LabelPair
+ currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
// The remaining member variables are only used for summaries/histograms.
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
@@ -74,6 +75,9 @@ type TextParser struct {
// count and sum of that summary/histogram.
currentIsSummaryCount, currentIsSummarySum bool
currentIsHistogramCount, currentIsHistogramSum bool
+ // These indicate if the metric name from the current line being parsed is inside
+ // braces and if that metric name was found respectively.
+ currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) {
}
p.currentQuantile = math.NaN()
p.currentBucket = math.NaN()
+ p.currentMF = nil
}
// startOfLine represents the state where the next byte read from p.buf is the
// start of a line (or whitespace leading up to it).
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
+ p.currentMetricIsInsideBraces = false
+ p.currentMetricInsideBracesIsPresent = false
if p.skipBlankTab(); p.err != nil {
// This is the only place that we expect to see io.EOF,
// which is not an error but the signal that we are done.
@@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn {
return p.startComment
case '\n':
return p.startOfLine // Empty line, start the next one.
+ case '{':
+ p.currentMetricIsInsideBraces = true
+ return p.readingLabels
}
return p.readingMetricName
}
@@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn {
return nil // Unexpected end of input.
}
if p.currentByte == '}' {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
return nil
}
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ if p.currentMetricIsInsideBraces {
+ if p.currentMetricInsideBracesIsPresent {
+ p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ switch p.currentByte {
+ case ',':
+ p.setOrCreateCurrentMF()
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetricInsideBracesIsPresent = true
+ return p.startLabelName
+ case '}':
+ p.setOrCreateCurrentMF()
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+ return nil
+ }
+ }
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ p.currentLabelPairs = nil
+ return nil
+ }
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
@@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn {
// labels to 'real' labels.
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
- if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
- return nil // Unexpected end of input.
- }
- if p.currentByte != '=' {
- p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
- return nil
+ p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
}
// Check for duplicate label names.
labels := make(map[string]struct{})
- for _, l := range p.currentMetric.Label {
+ for _, l := range p.currentLabelPairs {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+ p.currentLabelPairs = nil
return nil
}
}
@@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn {
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
} else {
@@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn {
return p.startLabelName
case '}':
+ if p.currentMF == nil {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
}
@@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
@@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
// but not into p.currentToken.
func (p *TextParser) readTokenAsMetricName() {
p.currentToken.Reset()
+ // A UTF-8 metric name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidMetricNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
return
}
}
@@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() {
// but not into p.currentToken.
func (p *TextParser) readTokenAsLabelName() {
p.currentToken.Reset()
+ // A UTF-8 label name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidLabelNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
return
}
}
@@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() {
p.currentToken.WriteByte('\n')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ p.currentLabelPairs = nil
return
}
escaped = false
@@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() {
}
func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
}
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
}
func isValidMetricNameStart(b byte) bool {
return isValidLabelNameStart(b) || b == ':'
}
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameContinuation(b, quoted) || b == ':'
}
func isBlankOrTab(b byte) bool {
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 3317ce22..73b7aa3e 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
-// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
-// names, and iff it's valid UTF-8 if NameValidationScheme is set to
-// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
-// check but a much faster hardcoded implementation.
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
switch NameValidationScheme {
case LegacyValidation:
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
+ return ln.IsValidLegacy()
case UTF8Validation:
return utf8.ValidString(string(ln))
default:
panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+func (ln LabelName) IsValidLegacy() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
return true
}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
index 481c47b4..abb2c900 100644
--- a/vendor/github.com/prometheus/common/model/labelset_string.go
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -11,8 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build go1.21
-
package model
import (
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
deleted file mode 100644
index c4212685..00000000
--- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2024 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !go1.21
-
-package model
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// String was optimized using functions not available for go 1.20
-// or lower. We keep the old implementation for compatibility with client_golang.
-// Once client golang drops support for go 1.20 (scheduled for August 2024), this
-// file can be removed.
-func (l LabelSet) String() string {
- labelNames := make([]string, 0, len(l))
- for name := range l {
- labelNames = append(labelNames, string(name))
- }
- sort.Strings(labelNames)
- lstrs := make([]string, 0, len(l))
- for _, name := range labelNames {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
- }
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index eb865e5a..f50966bc 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -34,10 +34,13 @@ var (
// goroutines are started.
NameValidationScheme = LegacyValidation
- // NameEscapingScheme defines the default way that names will be
- // escaped when presented to systems that do not support UTF-8 names. If the
- // Content-Type "escaping" term is specified, that will override this value.
- NameEscapingScheme = ValueEncodingEscaping
+ // NameEscapingScheme defines the default way that names will be escaped when
+ // presented to systems that do not support UTF-8 names. If the Content-Type
+ // "escaping" term is specified, that will override this value.
+ // NameEscapingScheme should not be set to the NoEscaping value. That string
+ // is used in content negotiation to indicate that a system supports UTF-8 and
+ // has that feature enabled.
+ NameEscapingScheme = UnderscoreEscaping
)
// ValidationScheme is a Go enum for determining how metric and label names will
@@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint {
func IsValidMetricName(n LabelValue) bool {
switch NameValidationScheme {
case LegacyValidation:
- return IsValidLegacyMetricName(n)
+ return IsValidLegacyMetricName(string(n))
case UTF8Validation:
if len(n) == 0 {
return false
@@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool {
// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
-func IsValidLegacyMetricName(n LabelValue) bool {
+func IsValidLegacyMetricName(n string) bool {
if len(n) == 0 {
return false
}
@@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
}
// If the name is nil, copy as-is, don't try to escape.
- if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
+ if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
out.Name = v.Name
} else {
out.Name = proto.String(EscapeName(v.GetName(), scheme))
@@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
for _, l := range m.Label {
if l.GetName() == MetricNameLabel {
- if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
escaped.Label = append(escaped.Label, l)
continue
}
@@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
})
continue
}
- if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
escaped.Label = append(escaped.Label, l)
continue
}
@@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF
func metricNeedsEscaping(m *dto.Metric) bool {
for _, l := range m.Label {
- if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
return true
}
- if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ if !IsValidLegacyMetricName(l.GetName()) {
return true
}
}
@@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
case NoEscaping:
return name
case UnderscoreEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
+ if IsValidLegacyMetricName(name) {
return name
}
for i, b := range name {
@@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string {
}
return escaped.String()
case ValueEncodingEscaping:
- if IsValidLegacyMetricName(LabelValue(name)) {
+ if IsValidLegacyMetricName(name) {
return name
}
escaped.WriteString("U__")
@@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) {
case EscapeValues:
return ValueEncodingEscaping, nil
default:
- return NoEscaping, fmt.Errorf("unknown format scheme " + s)
+ return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
}
}
diff --git a/vendor/github.com/rs/xid/.gitignore b/vendor/github.com/rs/xid/.gitignore
new file mode 100644
index 00000000..81be9277
--- /dev/null
+++ b/vendor/github.com/rs/xid/.gitignore
@@ -0,0 +1,3 @@
+/.idea
+/.vscode
+.DS_Store
\ No newline at end of file
diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md
index 974e67d2..1bf45bd1 100644
--- a/vendor/github.com/rs/xid/README.md
+++ b/vendor/github.com/rs/xid/README.md
@@ -4,7 +4,7 @@
Package xid is a globally unique id generator library, ready to safely be used directly in your server code.
-Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string:
+Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization ([base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10)) to make it shorter when transported as a string:
https://docs.mongodb.org/manual/reference/object-id/
- 4-byte value representing the seconds since the Unix epoch,
@@ -13,7 +13,7 @@ https://docs.mongodb.org/manual/reference/object-id/
- 3-byte counter, starting with a random value.
The binary representation of the id is compatible with Mongo 12 bytes Object IDs.
-The string representation is using base32 hex (w/o padding) for better space efficiency
+The string representation is using [base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10) (w/o padding) for better space efficiency
when stored in that form (20 bytes). The hex variant of base32 is used to retain the
sortable property of the id.
@@ -71,8 +71,10 @@ References:
- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid
- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid
- PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid
-- Swift port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/swift-xid
-- C++ port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/libxid
+- Swift port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/swift-xid
+- C++ port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/libxid
+- Typescript & Javascript port by [Yiwen AI](https://github.com/yiwen-ai): https://github.com/yiwen-ai/xid-ts
+- Gleam port by [Alexandre Del Vecchio](https://github.com/defgenx): https://github.com/defgenx/gxid
## Install
diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go
index 08351ff7..17351563 100644
--- a/vendor/github.com/rs/xid/hostid_darwin.go
+++ b/vendor/github.com/rs/xid/hostid_darwin.go
@@ -2,8 +2,33 @@
package xid
-import "syscall"
+import (
+ "errors"
+ "os/exec"
+ "strings"
+)
func readPlatformMachineID() (string, error) {
- return syscall.Sysctl("kern.uuid")
+ ioreg, err := exec.LookPath("ioreg")
+ if err != nil {
+ return "", err
+ }
+
+ cmd := exec.Command(ioreg, "-rd1", "-c", "IOPlatformExpertDevice")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+
+ for _, line := range strings.Split(string(out), "\n") {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.SplitAfter(line, `" = "`)
+ if len(parts) == 2 {
+ uuid := strings.TrimRight(parts[1], `"`)
+ return strings.ToLower(uuid), nil
+ }
+ }
+ }
+
+ return "", errors.New("cannot find host id")
}
diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go
index ec2593ee..a4d98ab0 100644
--- a/vendor/github.com/rs/xid/hostid_windows.go
+++ b/vendor/github.com/rs/xid/hostid_windows.go
@@ -11,11 +11,17 @@ import (
func readPlatformMachineID() (string, error) {
// source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go
var h syscall.Handle
- err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
+
+ regKeyCryptoPtr, err := syscall.UTF16PtrFromString(`SOFTWARE\Microsoft\Cryptography`)
+ if err != nil {
+ return "", fmt.Errorf(`error reading registry key "SOFTWARE\Microsoft\Cryptography": %w`, err)
+ }
+
+ err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, regKeyCryptoPtr, 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h)
if err != nil {
return "", err
}
- defer syscall.RegCloseKey(h)
+ defer func() { _ = syscall.RegCloseKey(h) }()
const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16
const uuidLen = 36
@@ -23,9 +29,15 @@ func readPlatformMachineID() (string, error) {
var regBuf [syscallRegBufLen]uint16
bufLen := uint32(syscallRegBufLen)
var valType uint32
- err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+
+ mGuidPtr, err := syscall.UTF16PtrFromString(`MachineGuid`)
if err != nil {
- return "", err
+ return "", fmt.Errorf("error reading machine GUID: %w", err)
+ }
+
+ err = syscall.RegQueryValueEx(h, mGuidPtr, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen)
+ if err != nil {
+ return "", fmt.Errorf("error parsing ")
}
hostID := syscall.UTF16ToString(regBuf[:])
diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go
index fcd7a041..e88984d9 100644
--- a/vendor/github.com/rs/xid/id.go
+++ b/vendor/github.com/rs/xid/id.go
@@ -54,7 +54,6 @@ import (
"sort"
"sync/atomic"
"time"
- "unsafe"
)
// Code inspired from mgo/bson ObjectId
@@ -172,7 +171,7 @@ func FromString(id string) (ID, error) {
func (id ID) String() string {
text := make([]byte, encodedLen)
encode(text, id[:])
- return *(*string)(unsafe.Pointer(&text))
+ return string(text)
}
// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it.
@@ -206,23 +205,23 @@ func encode(dst, id []byte) {
dst[19] = encoding[(id[11]<<4)&0x1F]
dst[18] = encoding[(id[11]>>1)&0x1F]
- dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F]
+ dst[17] = encoding[(id[11]>>6)|(id[10]<<2)&0x1F]
dst[16] = encoding[id[10]>>3]
dst[15] = encoding[id[9]&0x1F]
dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F]
dst[13] = encoding[(id[8]>>2)&0x1F]
dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F]
- dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F]
+ dst[11] = encoding[(id[7]>>4)|(id[6]<<4)&0x1F]
dst[10] = encoding[(id[6]>>1)&0x1F]
- dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F]
+ dst[9] = encoding[(id[6]>>6)|(id[5]<<2)&0x1F]
dst[8] = encoding[id[5]>>3]
dst[7] = encoding[id[4]&0x1F]
dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F]
dst[5] = encoding[(id[3]>>2)&0x1F]
dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F]
- dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F]
+ dst[3] = encoding[(id[2]>>4)|(id[1]<<4)&0x1F]
dst[2] = encoding[(id[1]>>1)&0x1F]
- dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F]
+ dst[1] = encoding[(id[1]>>6)|(id[0]<<2)&0x1F]
dst[0] = encoding[id[0]>>3]
}
diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml
index 33e6595c..b3091efd 100644
--- a/vendor/github.com/tklauser/numcpus/.cirrus.yml
+++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml
@@ -1,10 +1,10 @@
env:
CIRRUS_CLONE_DEPTH: 1
- GO_VERSION: go1.22.2
+ GO_VERSION: go1.23.0
freebsd_13_task:
freebsd_instance:
- image_family: freebsd-13-2
+ image_family: freebsd-13-3
install_script: |
pkg install -y go
GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest
diff --git a/vendor/github.com/tklauser/numcpus/numcpus.go b/vendor/github.com/tklauser/numcpus/numcpus.go
index af59983e..de206f06 100644
--- a/vendor/github.com/tklauser/numcpus/numcpus.go
+++ b/vendor/github.com/tklauser/numcpus/numcpus.go
@@ -73,3 +73,26 @@ func GetPossible() (int, error) {
func GetPresent() (int, error) {
return getPresent()
}
+
+// ListOffline returns the list of offline CPUs. See [GetOffline] for details on
+// when a CPU is considered offline.
+func ListOffline() ([]int, error) {
+ return listOffline()
+}
+
+// ListOnline returns the list of CPUs that are online and being scheduled.
+func ListOnline() ([]int, error) {
+ return listOnline()
+}
+
+// ListPossible returns the list of possible CPUs. See [GetPossible] for
+// details on when a CPU is considered possible.
+func ListPossible() ([]int, error) {
+ return listPossible()
+}
+
+// ListPresent returns the list of present CPUs. See [GetPresent] for
+// details on when a CPU is considered present.
+func ListPresent() ([]int, error) {
+ return listPresent()
+}
diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go
index 7e75cb06..7b991da4 100644
--- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go
+++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go
@@ -15,6 +15,7 @@
package numcpus
import (
+ "fmt"
"os"
"path/filepath"
"strconv"
@@ -23,7 +24,14 @@ import (
"golang.org/x/sys/unix"
)
-const sysfsCPUBasePath = "/sys/devices/system/cpu"
+const (
+ sysfsCPUBasePath = "/sys/devices/system/cpu"
+
+ offline = "offline"
+ online = "online"
+ possible = "possible"
+ present = "present"
+)
func getFromCPUAffinity() (int, error) {
var cpuSet unix.CPUSet
@@ -33,19 +41,26 @@ func getFromCPUAffinity() (int, error) {
return cpuSet.Count(), nil
}
-func readCPURange(file string) (int, error) {
+func readCPURangeWith[T any](file string, f func(cpus string) (T, error)) (T, error) {
+ var zero T
buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, file))
if err != nil {
- return 0, err
+ return zero, err
}
- return parseCPURange(strings.Trim(string(buf), "\n "))
+ return f(strings.Trim(string(buf), "\n "))
}
-func parseCPURange(cpus string) (int, error) {
+func countCPURange(cpus string) (int, error) {
+ // Treat empty file as valid. This might be the case if there are no offline CPUs in which
+ // case /sys/devices/system/cpu/offline is empty.
+ if cpus == "" {
+ return 0, nil
+ }
+
n := int(0)
for _, cpuRange := range strings.Split(cpus, ",") {
- if len(cpuRange) == 0 {
- continue
+ if cpuRange == "" {
+ return 0, fmt.Errorf("empty CPU range in CPU string %q", cpus)
}
from, to, found := strings.Cut(cpuRange, "-")
first, err := strconv.ParseUint(from, 10, 32)
@@ -60,11 +75,49 @@ func parseCPURange(cpus string) (int, error) {
if err != nil {
return 0, err
}
+ if last < first {
+ return 0, fmt.Errorf("last CPU in range (%d) less than first (%d)", last, first)
+ }
n += int(last - first + 1)
}
return n, nil
}
+func listCPURange(cpus string) ([]int, error) {
+ // See comment in countCPURange.
+ if cpus == "" {
+ return []int{}, nil
+ }
+
+ list := []int{}
+ for _, cpuRange := range strings.Split(cpus, ",") {
+ if cpuRange == "" {
+ return nil, fmt.Errorf("empty CPU range in CPU string %q", cpus)
+ }
+ from, to, found := strings.Cut(cpuRange, "-")
+ first, err := strconv.ParseUint(from, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ // range containing a single element
+ list = append(list, int(first))
+ continue
+ }
+ last, err := strconv.ParseUint(to, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ if last < first {
+ return nil, fmt.Errorf("last CPU in range (%d) less than first (%d)", last, first)
+ }
+ for cpu := int(first); cpu <= int(last); cpu++ {
+ list = append(list, cpu)
+ }
+ }
+ return list, nil
+}
+
func getConfigured() (int, error) {
d, err := os.Open(sysfsCPUBasePath)
if err != nil {
@@ -100,20 +153,36 @@ func getKernelMax() (int, error) {
}
func getOffline() (int, error) {
- return readCPURange("offline")
+ return readCPURangeWith(offline, countCPURange)
}
func getOnline() (int, error) {
if n, err := getFromCPUAffinity(); err == nil {
return n, nil
}
- return readCPURange("online")
+ return readCPURangeWith(online, countCPURange)
}
func getPossible() (int, error) {
- return readCPURange("possible")
+ return readCPURangeWith(possible, countCPURange)
}
func getPresent() (int, error) {
- return readCPURange("present")
+ return readCPURangeWith(present, countCPURange)
+}
+
+func listOffline() ([]int, error) {
+ return readCPURangeWith(offline, listCPURange)
+}
+
+func listOnline() ([]int, error) {
+ return readCPURangeWith(online, listCPURange)
+}
+
+func listPossible() ([]int, error) {
+ return readCPURangeWith(possible, listCPURange)
+}
+
+func listPresent() ([]int, error) {
+ return readCPURangeWith(present, listCPURange)
}
diff --git a/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go b/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go
new file mode 100644
index 00000000..af4efeac
--- /dev/null
+++ b/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go
@@ -0,0 +1,33 @@
+// Copyright 2024 Tobias Klauser
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux
+
+package numcpus
+
+func listOffline() ([]int, error) {
+ return nil, ErrNotSupported
+}
+
+func listOnline() ([]int, error) {
+ return nil, ErrNotSupported
+}
+
+func listPossible() ([]int, error) {
+ return nil, ErrNotSupported
+}
+
+func listPresent() ([]int, error) {
+ return nil, ErrNotSupported
+}
diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt
index 4b620fee..3e29faab 100644
--- a/vendor/github.com/urfave/cli/v2/godoc-current.txt
+++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt
@@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME:
{{template "helpNameTemplate" .}}
USAGE:
- {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+ {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
VERSION:
{{.Version}}{{end}}{{end}}{{if .Description}}
@@ -136,7 +136,7 @@ var SubcommandHelpTemplate = `NAME:
{{template "helpNameTemplate" .}}
USAGE:
- {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
+ {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Description}}
DESCRIPTION:
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go
index 640e2904..874be941 100644
--- a/vendor/github.com/urfave/cli/v2/help.go
+++ b/vendor/github.com/urfave/cli/v2/help.go
@@ -248,7 +248,6 @@ func ShowCommandHelpAndExit(c *Context, command string, code int) {
// ShowCommandHelp prints help for the given command
func ShowCommandHelp(ctx *Context, command string) error {
-
commands := ctx.App.Commands
if ctx.Command.Subcommands != nil {
commands = ctx.Command.Subcommands
@@ -337,7 +336,6 @@ func ShowCommandCompletions(ctx *Context, command string) {
DefaultCompleteWithFlags(c)(ctx)
}
}
-
}
// printHelpCustom is the default implementation of HelpPrinterCustom.
@@ -345,7 +343,6 @@ func ShowCommandCompletions(ctx *Context, command string) {
// The customFuncs map will be combined with a default template.FuncMap to
// allow using arbitrary functions in template rendering.
func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) {
-
const maxLineLength = 10000
funcMap := template.FuncMap{
@@ -450,6 +447,15 @@ func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) {
return false, arguments
}
+ for _, arg := range arguments {
+ // If arguments include "--", shell completion is disabled
+ // because after "--" only positional arguments are accepted.
+ // https://unix.stackexchange.com/a/11382
+ if arg == "--" {
+ return false, arguments
+ }
+ }
+
return true, arguments[:pos]
}
@@ -499,7 +505,6 @@ func wrap(input string, offset int, wrapAt int) string {
ss = append(ss, wrapped)
} else {
ss = append(ss, padding+wrapped)
-
}
}
diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go
index 5748f4c2..8abc5ba4 100644
--- a/vendor/github.com/urfave/cli/v2/template.go
+++ b/vendor/github.com/urfave/cli/v2/template.go
@@ -1,7 +1,7 @@
package cli
var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}`
-var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}`
+var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}`
var descriptionTemplate = `{{wrap .Description 3}}`
var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
{{range $index, $author := .Authors}}{{if $index}}
@@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME:
{{template "helpNameTemplate" .}}
USAGE:
- {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+ {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
VERSION:
{{.Version}}{{end}}{{end}}{{if .Description}}
@@ -83,7 +83,7 @@ var SubcommandHelpTemplate = `NAME:
{{template "helpNameTemplate" .}}
USAGE:
- {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
+ {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Description}}
DESCRIPTION:
{{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/document.go b/vendor/github.com/vektah/gqlparser/v2/ast/document.go
index a3a9e98d..7881f349 100644
--- a/vendor/github.com/vektah/gqlparser/v2/ast/document.go
+++ b/vendor/github.com/vektah/gqlparser/v2/ast/document.go
@@ -26,9 +26,10 @@ func (d *SchemaDocument) Merge(other *SchemaDocument) {
}
type Schema struct {
- Query *Definition
- Mutation *Definition
- Subscription *Definition
+ Query *Definition
+ Mutation *Definition
+ Subscription *Definition
+ SchemaDirectives DirectiveList
Types map[string]*Definition
Directives map[string]*DirectiveDefinition
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
index 24d4f3db..daa86448 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"fmt"
@@ -11,8 +11,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("FieldsOnCorrectType", func(observers *Events, addError AddErrFunc) {
+var FieldsOnCorrectTypeRule = Rule{
+ Name: "FieldsOnCorrectType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnField(func(walker *Walker, field *ast.Field) {
if field.ObjectDefinition == nil || field.Definition != nil {
return
@@ -27,14 +28,18 @@ func init() {
}
addError(
- Message(message),
+ Message("%s", message),
At(field.Position),
)
})
- })
+ },
+}
+
+func init() {
+ AddRule(FieldsOnCorrectTypeRule.Name, FieldsOnCorrectTypeRule.RuleFunc)
}
-// Go through all of the implementations of type, as well as the interfaces
+// Go through all the implementations of type, as well as the interfaces
// that they implement. If any of those types include the provided field,
// suggest them, sorted by how often the type is referenced, starting
// with Interfaces.
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
index 81ef861b..ccbffbf6 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"fmt"
@@ -9,8 +9,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("FragmentsOnCompositeTypes", func(observers *Events, addError AddErrFunc) {
+var FragmentsOnCompositeTypesRule = Rule{
+ Name: "FragmentsOnCompositeTypes",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) {
fragmentType := walker.Schema.Types[inlineFragment.TypeCondition]
if fragmentType == nil || fragmentType.IsCompositeType() {
@@ -20,7 +21,7 @@ func init() {
message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition)
addError(
- Message(message),
+ Message("%s", message),
At(inlineFragment.Position),
)
})
@@ -33,9 +34,13 @@ func init() {
message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition)
addError(
- Message(message),
+ Message("%s", message),
At(fragment.Position),
)
})
- })
+ },
+}
+
+func init() {
+ AddRule(FragmentsOnCompositeTypesRule.Name, FragmentsOnCompositeTypesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
index c187dabf..2659aeb5 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) {
+var KnownArgumentNamesRule = Rule{
+ Name: "KnownArgumentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
// A GraphQL field is only valid if all supplied arguments are defined by that field.
observers.OnField(func(walker *Walker, field *ast.Field) {
if field.Definition == nil || field.ObjectDefinition == nil {
@@ -55,5 +56,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(KnownArgumentNamesRule.Name, KnownArgumentNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
index f7bae811..b68fa5e8 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("KnownDirectives", func(observers *Events, addError AddErrFunc) {
+var KnownDirectivesRule = Rule{
+ Name: "KnownDirectives",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
type mayNotBeUsedDirective struct {
Name string
Line int
@@ -45,5 +46,9 @@ func init() {
seen[tmp] = true
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(KnownDirectivesRule.Name, KnownDirectivesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
index 3afd9c1c..77a82f67 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) {
+var KnownFragmentNamesRule = Rule{
+ Name: "KnownFragmentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) {
if fragmentSpread.Definition == nil {
addError(
@@ -17,5 +18,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(KnownFragmentNamesRule.Name, KnownFragmentNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
index 60bc0d52..76962c3d 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"fmt"
@@ -9,8 +9,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("KnownRootType", func(observers *Events, addError AddErrFunc) {
+var KnownRootTypeRule = Rule{
+ Name: "KnownRootType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
// A query's root must be a valid type. Surprisingly, this isn't
// checked anywhere else!
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
@@ -33,5 +34,9 @@ func init() {
At(operation.Position))
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(KnownRootTypeRule.Name, KnownRootTypeRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
index 902939d3..717019fb 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("KnownTypeNames", func(observers *Events, addError AddErrFunc) {
+var KnownTypeNamesRule = Rule{
+ Name: "KnownTypeNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) {
typeName := variable.Type.Name()
typdef := walker.Schema.Types[typeName]
@@ -57,5 +58,9 @@ func init() {
At(fragment.Position),
)
})
- })
+ },
+}
+
+func init() {
+ AddRule(KnownTypeNamesRule.Name, KnownTypeNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
index fe8bb203..ba71f141 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("LoneAnonymousOperation", func(observers *Events, addError AddErrFunc) {
+var LoneAnonymousOperationRule = Rule{
+ Name: "LoneAnonymousOperation",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
if operation.Name == "" && len(walker.Document.Operations) > 1 {
addError(
@@ -17,5 +18,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(LoneAnonymousOperationRule.Name, LoneAnonymousOperationRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
index a953174f..c80def7c 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"fmt"
@@ -10,8 +10,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("NoFragmentCycles", func(observers *Events, addError AddErrFunc) {
+var NoFragmentCyclesRule = Rule{
+ Name: "NoFragmentCycles",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
visitedFrags := make(map[string]bool)
observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
@@ -67,7 +68,11 @@ func init() {
recursive(fragment)
})
- })
+ },
+}
+
+func init() {
+ AddRule(NoFragmentCyclesRule.Name, NoFragmentCyclesRule.RuleFunc)
}
func getFragmentSpreads(node ast.SelectionSet) []*ast.FragmentSpread {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
index 46c18d12..84ed5fa7 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("NoUndefinedVariables", func(observers *Events, addError AddErrFunc) {
+var NoUndefinedVariablesRule = Rule{
+ Name: "NoUndefinedVariables",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if walker.CurrentOperation == nil || value.Kind != ast.Variable || value.VariableDefinition != nil {
return
@@ -26,5 +27,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(NoUndefinedVariablesRule.Name, NoUndefinedVariablesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
index 59d9c15c..e95bbe69 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("NoUnusedFragments", func(observers *Events, addError AddErrFunc) {
+var NoUnusedFragmentsRule = Rule{
+ Name: "NoUnusedFragments",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
inFragmentDefinition := false
fragmentNameUsed := make(map[string]bool)
@@ -27,5 +28,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(NoUnusedFragmentsRule.Name, NoUnusedFragmentsRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
index d3088109..425df205 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) {
+var NoUnusedVariablesRule = Rule{
+ Name: "NoUnusedVariables",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
for _, varDef := range operation.VariableDefinitions {
if varDef.Used {
@@ -28,5 +29,9 @@ func init() {
}
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(NoUnusedVariablesRule.Name, NoUnusedVariablesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
index eaa2035e..d2c66aac 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"bytes"
@@ -11,8 +11,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("OverlappingFieldsCanBeMerged", func(observers *Events, addError AddErrFunc) {
+var OverlappingFieldsCanBeMergedRule = Rule{
+ Name: "OverlappingFieldsCanBeMerged",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
/**
* Algorithm:
*
@@ -41,7 +42,7 @@ func init() {
*
* D) When comparing "between" a set of fields and a referenced fragment, first
* a comparison is made between each field in the original set of fields and
- * each field in the the referenced set of fields.
+ * each field in the referenced set of fields.
*
* E) Also, if any fragment is referenced in the referenced selection set,
* then a comparison is made "between" the original set of fields and the
@@ -104,7 +105,11 @@ func init() {
conflict.addFieldsConflictMessage(addError)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(OverlappingFieldsCanBeMergedRule.Name, OverlappingFieldsCanBeMergedRule.RuleFunc)
}
type pairSet struct {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
index 244e5f20..01255e9b 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("PossibleFragmentSpreads", func(observers *Events, addError AddErrFunc) {
+var PossibleFragmentSpreadsRule = Rule{
+ Name: "PossibleFragmentSpreads",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
validate := func(walker *Walker, parentDef *ast.Definition, fragmentName string, emitError func()) {
if parentDef == nil {
return
@@ -65,5 +66,9 @@ func init() {
)
})
})
- })
+ },
+}
+
+func init() {
+ AddRule(PossibleFragmentSpreadsRule.Name, PossibleFragmentSpreadsRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
index ab79163b..37428d44 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go
@@ -1,14 +1,14 @@
-package validator
+package rules
import (
- "github.com/vektah/gqlparser/v2/ast"
-
//nolint:revive // Validator rules each use dot imports for convenience.
+ "github.com/vektah/gqlparser/v2/ast"
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("ProvidedRequiredArguments", func(observers *Events, addError AddErrFunc) {
+var ProvidedRequiredArgumentsRule = Rule{
+ Name: "ProvidedRequiredArguments",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnField(func(walker *Walker, field *ast.Field) {
if field.Definition == nil {
return
@@ -60,5 +60,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(ProvidedRequiredArgumentsRule.Name, ProvidedRequiredArgumentsRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
index 605ab9e8..5628ce2c 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("ScalarLeafs", func(observers *Events, addError AddErrFunc) {
+var ScalarLeafsRule = Rule{
+ Name: "ScalarLeafs",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnField(func(walker *Walker, field *ast.Field) {
if field.Definition == nil {
return
@@ -34,5 +35,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(ScalarLeafsRule.Name, ScalarLeafsRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
index 7d4c6843..94a4b304 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"strconv"
@@ -10,8 +10,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("SingleFieldSubscriptions", func(observers *Events, addError AddErrFunc) {
+var SingleFieldSubscriptionsRule = Rule{
+ Name: "SingleFieldSubscriptions",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
if walker.Schema.Subscription == nil || operation.Operation != ast.Subscription {
return
@@ -40,7 +41,11 @@ func init() {
}
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(SingleFieldSubscriptionsRule.Name, SingleFieldSubscriptionsRule.RuleFunc)
}
type topField struct {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
index e977d638..d0c52909 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) {
+var UniqueArgumentNamesRule = Rule{
+ Name: "UniqueArgumentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnField(func(walker *Walker, field *ast.Field) {
checkUniqueArgs(field.Arguments, addError)
})
@@ -16,7 +17,11 @@ func init() {
observers.OnDirective(func(walker *Walker, directive *ast.Directive) {
checkUniqueArgs(directive.Arguments, addError)
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueArgumentNamesRule.Name, UniqueArgumentNamesRule.RuleFunc)
}
func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
index 47971ee1..4cab38bd 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) {
+var UniqueDirectivesPerLocationRule = Rule{
+ Name: "UniqueDirectivesPerLocation",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnDirectiveList(func(walker *Walker, directives []*ast.Directive) {
seen := map[string]bool{}
@@ -22,5 +23,9 @@ func init() {
seen[dir.Name] = true
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueDirectivesPerLocationRule.Name, UniqueDirectivesPerLocationRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
index 2c44a437..3d2c7289 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) {
+var UniqueFragmentNamesRule = Rule{
+ Name: "UniqueFragmentNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
seenFragments := map[string]bool{}
observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) {
@@ -20,5 +21,9 @@ func init() {
}
seenFragments[fragment.Name] = true
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueFragmentNamesRule.Name, UniqueFragmentNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
index c5fce8ff..3ccbbfe0 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) {
+var UniqueInputFieldNamesRule = Rule{
+ Name: "UniqueInputFieldNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if value.Kind != ast.ObjectValue {
return
@@ -25,5 +26,9 @@ func init() {
seen[field.Name] = true
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueInputFieldNamesRule.Name, UniqueInputFieldNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
index 49ffbe47..401b0ad3 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) {
+var UniqueOperationNamesRule = Rule{
+ Name: "UniqueOperationNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
seen := map[string]bool{}
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
@@ -20,5 +21,9 @@ func init() {
}
seen[operation.Name] = true
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueOperationNamesRule.Name, UniqueOperationNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
index c93948c1..f0e4a200 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) {
+var UniqueVariableNamesRule = Rule{
+ Name: "UniqueVariableNames",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
seen := map[string]int{}
for _, def := range operation.VariableDefinitions {
@@ -22,5 +23,9 @@ func init() {
seen[def.Variable]++
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(UniqueVariableNamesRule.Name, UniqueVariableNamesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
index 914e428e..7784fe0c 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"errors"
@@ -11,8 +11,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) {
+var ValuesOfCorrectTypeRule = Rule{
+ Name: "ValuesOfCorrectType",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if value.Definition == nil || value.ExpectedType == nil {
return
@@ -134,7 +135,11 @@ func init() {
panic(fmt.Errorf("unhandled %T", value))
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(ValuesOfCorrectTypeRule.Name, ValuesOfCorrectTypeRule.RuleFunc)
}
func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
index d16ee021..59b2e6f7 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) {
+var VariablesAreInputTypesRule = Rule{
+ Name: "VariablesAreInputTypes",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) {
for _, def := range operation.VariableDefinitions {
if def.Definition == nil {
@@ -26,5 +27,9 @@ func init() {
}
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(VariablesAreInputTypesRule.Name, VariablesAreInputTypesRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
index e3fd6fbb..75b0f7f7 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go
@@ -1,4 +1,4 @@
-package validator
+package rules
import (
"github.com/vektah/gqlparser/v2/ast"
@@ -7,8 +7,9 @@ import (
. "github.com/vektah/gqlparser/v2/validator"
)
-func init() {
- AddRule("VariablesInAllowedPosition", func(observers *Events, addError AddErrFunc) {
+var VariablesInAllowedPositionRule = Rule{
+ Name: "VariablesInAllowedPosition",
+ RuleFunc: func(observers *Events, addError AddErrFunc) {
observers.OnValue(func(walker *Walker, value *ast.Value) {
if value.Kind != ast.Variable || value.ExpectedType == nil || value.VariableDefinition == nil || walker.CurrentOperation == nil {
return
@@ -36,5 +37,9 @@ func init() {
)
}
})
- })
+ },
+}
+
+func init() {
+ AddRule(VariablesInAllowedPositionRule.Name, VariablesInAllowedPositionRule.RuleFunc)
}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
index d8590284..9f9ddde4 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
@@ -122,6 +122,10 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) {
schema.Subscription = def
}
}
+ if err := validateDirectives(&schema, sd.Schema[0].Directives, LocationSchema, nil); err != nil {
+ return nil, err
+ }
+ schema.SchemaDirectives = append(schema.SchemaDirectives, sd.Schema[0].Directives...)
}
for _, ext := range sd.SchemaExtension {
@@ -139,6 +143,10 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) {
schema.Subscription = def
}
}
+ if err := validateDirectives(&schema, ext.Directives, LocationSchema, nil); err != nil {
+ return nil, err
+ }
+ schema.SchemaDirectives = append(schema.SchemaDirectives, ext.Directives...)
}
if err := validateTypeDefinitions(&schema); err != nil {
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
index b4f37ce2..36564b23 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go
@@ -8,22 +8,26 @@ import (
type AddErrFunc func(options ...ErrorOption)
-type ruleFunc func(observers *Events, addError AddErrFunc)
+type RuleFunc func(observers *Events, addError AddErrFunc)
-type rule struct {
- name string
- rule ruleFunc
+type Rule struct {
+ Name string
+ RuleFunc RuleFunc
}
-var rules []rule
+var specifiedRules []Rule
-// addRule to rule set.
+// AddRule adds rule to the rule set.
// f is called once each time `Validate` is executed.
-func AddRule(name string, f ruleFunc) {
- rules = append(rules, rule{name: name, rule: f})
+func AddRule(name string, ruleFunc RuleFunc) {
+ specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc})
}
-func Validate(schema *Schema, doc *QueryDocument) gqlerror.List {
+func Validate(schema *Schema, doc *QueryDocument, rules ...Rule) gqlerror.List {
+ if rules == nil {
+ rules = specifiedRules
+ }
+
var errs gqlerror.List
if schema == nil {
errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil"))
@@ -37,9 +41,9 @@ func Validate(schema *Schema, doc *QueryDocument) gqlerror.List {
observers := &Events{}
for i := range rules {
rule := rules[i]
- rule.rule(observers, func(options ...ErrorOption) {
+ rule.RuleFunc(observers, func(options ...ErrorOption) {
err := &gqlerror.Error{
- Rule: rule.name,
+ Rule: rule.Name,
}
for _, o := range options {
o(err)
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
index c386b6b9..f2934caf 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go
@@ -180,7 +180,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec
return val, gqlerror.ErrorPathf(v.path, "cannot use %s as %s", kind.String(), typ.NamedType)
case ast.InputObject:
if val.Kind() != reflect.Map {
- return val, gqlerror.ErrorPathf(v.path, "must be a %s", def.Name)
+ return val, gqlerror.ErrorPathf(v.path, "must be a %s, not a %s", def.Name, val.Kind())
}
// check for unknown fields
diff --git a/vendor/github.com/zeebo/blake3/.gitignore b/vendor/github.com/zeebo/blake3/.gitignore
index c6bfdf2c..80c08326 100644
--- a/vendor/github.com/zeebo/blake3/.gitignore
+++ b/vendor/github.com/zeebo/blake3/.gitignore
@@ -4,3 +4,4 @@
*.out
/upstream
+/go.work
diff --git a/vendor/github.com/zeebo/blake3/Makefile b/vendor/github.com/zeebo/blake3/Makefile
index b96623be..a88ed17c 100644
--- a/vendor/github.com/zeebo/blake3/Makefile
+++ b/vendor/github.com/zeebo/blake3/Makefile
@@ -21,14 +21,11 @@ test:
.PHONY: vet
vet:
- GOOS=linux GOARCH=386 GO386=softfloat go vet ./...
- GOOS=windows GOARCH=386 GO386=softfloat go vet ./...
- GOOS=linux GOARCH=amd64 go vet ./...
- GOOS=windows GOARCH=amd64 go vet ./...
- GOOS=darwin GOARCH=amd64 go vet ./...
- GOOS=linux GOARCH=arm go vet ./...
- GOOS=linux GOARCH=arm64 go vet ./...
- GOOS=windows GOARCH=arm64 go vet ./...
- GOOS=darwin GOARCH=arm64 go vet ./...
- GOOS=js GOARCH=wasm go vet ./...
- GOOS=linux GOARCH=mips go vet ./...
\ No newline at end of file
+ go tool dist list \
+ | sed -e 's#/# #g' \
+ | while read goos goarch; \
+ do \
+ echo $$goos $$goarch; \
+ GOOS=$$goos GOARCH=$$goarch CGO_ENABLED=1 GO386=softfloat go vet ./...; \
+ GOOS=$$goos GOARCH=$$goarch CGO_ENABLED=1 GO386=softfloat go vet -tags=purego ./...; \
+ done
diff --git a/vendor/github.com/zeebo/blake3/digest.go b/vendor/github.com/zeebo/blake3/digest.go
index 4c511fbd..2578c2b8 100644
--- a/vendor/github.com/zeebo/blake3/digest.go
+++ b/vendor/github.com/zeebo/blake3/digest.go
@@ -22,7 +22,7 @@ type Digest struct {
bufn int
}
-// Read reads data frm the hasher into out. It always fills the entire buffer and
+// Read reads data from the hasher into out. It always fills the entire buffer and
// never errors. The stream will wrap around when reading past 2^64 bytes.
func (d *Digest) Read(p []byte) (n int, err error) {
n = len(p)
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/cpu.go b/vendor/github.com/zeebo/blake3/internal/consts/cpu.go
index 20d67f18..0146899a 100644
--- a/vendor/github.com/zeebo/blake3/internal/consts/cpu.go
+++ b/vendor/github.com/zeebo/blake3/internal/consts/cpu.go
@@ -1,3 +1,5 @@
+//go:build !purego
+
package consts
import (
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/cpu_purego.go b/vendor/github.com/zeebo/blake3/internal/consts/cpu_purego.go
new file mode 100644
index 00000000..e80e9591
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/consts/cpu_purego.go
@@ -0,0 +1,8 @@
+//go:build purego
+
+package consts
+
+const (
+ HasAVX2 = false
+ HasSSE41 = false
+)
diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version
index f124bfa1..013173af 100644
--- a/vendor/go.etcd.io/bbolt/.go-version
+++ b/vendor/go.etcd.io/bbolt/.go-version
@@ -1 +1 @@
-1.21.9
+1.22.6
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
index 18154c63..21407797 100644
--- a/vendor/go.etcd.io/bbolt/Makefile
+++ b/vendor/go.etcd.io/bbolt/Makefile
@@ -41,6 +41,15 @@ coverage:
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
+BOLT_CMD=bbolt
+
+build:
+ go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
+
+.PHONY: clean
+clean: # Clean binaries
+ rm -f ./bin/${BOLT_CMD}
+
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
@@ -61,3 +70,7 @@ test-failpoint:
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+.PHONY: test-robustness # Running robustness tests requires root permission
+test-robustness:
+ go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
+ go test -v ${TESTFLAGS} ./tests/robustness -test.root
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
index 4175bdf3..822798e4 100644
--- a/vendor/go.etcd.io/bbolt/db.go
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -524,7 +524,7 @@ func (db *DB) munmap() error {
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
+ return fmt.Errorf("unmap error: %v", err.Error())
}
return nil
@@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error {
// gofail: var munlockError string
// return errors.New(munlockError)
if err := munlock(db, fileSize); err != nil {
- return fmt.Errorf("munlock error: " + err.Error())
+ return fmt.Errorf("munlock error: %v", err.Error())
}
return nil
}
@@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error {
// gofail: var mlockError string
// return errors.New(mlockError)
if err := mlock(db, fileSize); err != nil {
- return fmt.Errorf("mlock error: " + err.Error())
+ return fmt.Errorf("mlock error: %v", err.Error())
}
return nil
}
@@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error {
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if runtime.GOOS != "windows" {
+ // gofail: var resizeFileError string
+ // return errors.New(resizeFileError)
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go
index 61d43f81..dffc7bc7 100644
--- a/vendor/go.etcd.io/bbolt/freelist.go
+++ b/vendor/go.etcd.io/bbolt/freelist.go
@@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) {
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(f.pending, txid)
+
+ // Remove pgids which are allocated by this txid
+ for pgid, tid := range f.allocs {
+ if tid == txid {
+ delete(f.allocs, pgid)
+ }
+ }
+
f.mergeSpans(m)
}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
index 2fac8c0a..766395de 100644
--- a/vendor/go.etcd.io/bbolt/tx.go
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -1,6 +1,7 @@
package bbolt
import (
+ "errors"
"fmt"
"io"
"os"
@@ -185,6 +186,10 @@ func (tx *Tx) Commit() error {
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
+ _ = errors.New("")
+ // gofail: var lackOfDiskSpace string
+ // tx.rollback()
+ // return errors.New(lackOfDiskSpace)
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
@@ -470,6 +475,7 @@ func (tx *Tx) write() error {
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncDataPages struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
@@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncMetaPage struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
index 3b974754..f9057fd2 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
@@ -25,15 +25,18 @@ package runtime
import (
"errors"
- "math"
cg "go.uber.org/automaxprocs/internal/cgroups"
)
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
-// to a valid GOMAXPROCS value.
-func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
- cgroups, err := newQueryer()
+// to a valid GOMAXPROCS value. The quota is converted from float to int using round.
+// If round == nil, DefaultRoundFunc is used.
+func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) {
+ if round == nil {
+ round = DefaultRoundFunc
+ }
+ cgroups, err := _newQueryer()
if err != nil {
return -1, CPUQuotaUndefined, err
}
@@ -43,7 +46,7 @@ func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, err
}
- maxProcs := int(math.Floor(quota))
+ maxProcs := round(quota)
if minValue > 0 && maxProcs < minValue {
return minValue, CPUQuotaMinUsed, nil
}
@@ -57,6 +60,7 @@ type queryer interface {
var (
_newCgroups2 = cg.NewCGroups2ForCurrentProcess
_newCgroups = cg.NewCGroupsForCurrentProcess
+ _newQueryer = newQueryer
)
func newQueryer() (queryer, error) {
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
index 69225544..e7470150 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
@@ -26,6 +26,6 @@ package runtime
// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
// current OS.
-func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) {
+func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) {
return -1, CPUQuotaUndefined, nil
}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
index df6eacf0..f8a2834a 100644
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
+++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
@@ -20,6 +20,8 @@
package runtime
+import "math"
+
// CPUQuotaStatus presents the status of how CPU quota is used
type CPUQuotaStatus int
@@ -31,3 +33,8 @@ const (
// CPUQuotaMinUsed is returned when CPU quota is smaller than the min value
CPUQuotaMinUsed
)
+
+// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor).
+func DefaultRoundFunc(v float64) int {
+ return int(math.Floor(v))
+}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
index 98176d64..e561fe60 100644
--- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
@@ -37,9 +37,10 @@ func currentMaxProcs() int {
}
type config struct {
- printf func(string, ...interface{})
- procs func(int) (int, iruntime.CPUQuotaStatus, error)
- minGOMAXPROCS int
+ printf func(string, ...interface{})
+ procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error)
+ minGOMAXPROCS int
+ roundQuotaFunc func(v float64) int
}
func (c *config) log(fmt string, args ...interface{}) {
@@ -71,6 +72,13 @@ func Min(n int) Option {
})
}
+// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int.
+func RoundQuotaFunc(rf func(v float64) int) Option {
+ return optionFunc(func(cfg *config) {
+ cfg.roundQuotaFunc = rf
+ })
+}
+
type optionFunc func(*config)
func (of optionFunc) apply(cfg *config) { of(cfg) }
@@ -82,8 +90,9 @@ func (of optionFunc) apply(cfg *config) { of(cfg) }
// configured CPU quota.
func Set(opts ...Option) (func(), error) {
cfg := &config{
- procs: iruntime.CPUQuotaToGOMAXPROCS,
- minGOMAXPROCS: 1,
+ procs: iruntime.CPUQuotaToGOMAXPROCS,
+ roundQuotaFunc: iruntime.DefaultRoundFunc,
+ minGOMAXPROCS: 1,
}
for _, o := range opts {
o.apply(cfg)
@@ -102,7 +111,7 @@ func Set(opts ...Option) (func(), error) {
return undoNoop, nil
}
- maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS)
+ maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc)
if err != nil {
return undoNoop, err
}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
index 108a9553..cc7fc5ae 100644
--- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go
+++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
@@ -21,4 +21,4 @@
package maxprocs
// Version is the current package version.
-const Version = "1.5.2"
+const Version = "1.6.0"
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/crypto/LICENSE
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
index 6713acca..c3895478 100644
--- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
+++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
@@ -1,243 +1,2791 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT.
//go:build amd64 && gc && !purego
#include "textflag.h"
-DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
-
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v6, t1; \
- PUNPCKLQDQ v6, t2; \
- PUNPCKHQDQ v7, v6; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ v7, t2; \
- MOVO t1, v7; \
- MOVO v2, t1; \
- PUNPCKHQDQ t2, v7; \
- PUNPCKLQDQ v3, t2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v3
-
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v2, t1; \
- PUNPCKLQDQ v2, t2; \
- PUNPCKHQDQ v3, v2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ v3, t2; \
- MOVO t1, v3; \
- MOVO v6, t1; \
- PUNPCKHQDQ t2, v3; \
- PUNPCKLQDQ v7, t2; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v7
-
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
- MOVO v0, t0; \
- PMULULQ v2, t0; \
- PADDQ v2, v0; \
- PADDQ t0, v0; \
- PADDQ t0, v0; \
- PXOR v0, v6; \
- PSHUFD $0xB1, v6, v6; \
- MOVO v4, t0; \
- PMULULQ v6, t0; \
- PADDQ v6, v4; \
- PADDQ t0, v4; \
- PADDQ t0, v4; \
- PXOR v4, v2; \
- PSHUFB c40, v2; \
- MOVO v0, t0; \
- PMULULQ v2, t0; \
- PADDQ v2, v0; \
- PADDQ t0, v0; \
- PADDQ t0, v0; \
- PXOR v0, v6; \
- PSHUFB c48, v6; \
- MOVO v4, t0; \
- PMULULQ v6, t0; \
- PADDQ v6, v4; \
- PADDQ t0, v4; \
- PADDQ t0, v4; \
- PXOR v4, v2; \
- MOVO v2, t0; \
- PADDQ v2, t0; \
- PSRLQ $63, v2; \
- PXOR t0, v2; \
- MOVO v1, t0; \
- PMULULQ v3, t0; \
- PADDQ v3, v1; \
- PADDQ t0, v1; \
- PADDQ t0, v1; \
- PXOR v1, v7; \
- PSHUFD $0xB1, v7, v7; \
- MOVO v5, t0; \
- PMULULQ v7, t0; \
- PADDQ v7, v5; \
- PADDQ t0, v5; \
- PADDQ t0, v5; \
- PXOR v5, v3; \
- PSHUFB c40, v3; \
- MOVO v1, t0; \
- PMULULQ v3, t0; \
- PADDQ v3, v1; \
- PADDQ t0, v1; \
- PADDQ t0, v1; \
- PXOR v1, v7; \
- PSHUFB c48, v7; \
- MOVO v5, t0; \
- PMULULQ v7, t0; \
- PADDQ v7, v5; \
- PADDQ t0, v5; \
- PADDQ t0, v5; \
- PXOR v5, v3; \
- MOVO v3, t0; \
- PADDQ v3, t0; \
- PSRLQ $63, v3; \
- PXOR t0, v3
-
-#define LOAD_MSG_0(block, off) \
- MOVOU 8*(off+0)(block), X0; \
- MOVOU 8*(off+2)(block), X1; \
- MOVOU 8*(off+4)(block), X2; \
- MOVOU 8*(off+6)(block), X3; \
- MOVOU 8*(off+8)(block), X4; \
- MOVOU 8*(off+10)(block), X5; \
- MOVOU 8*(off+12)(block), X6; \
- MOVOU 8*(off+14)(block), X7
-
-#define STORE_MSG_0(block, off) \
- MOVOU X0, 8*(off+0)(block); \
- MOVOU X1, 8*(off+2)(block); \
- MOVOU X2, 8*(off+4)(block); \
- MOVOU X3, 8*(off+6)(block); \
- MOVOU X4, 8*(off+8)(block); \
- MOVOU X5, 8*(off+10)(block); \
- MOVOU X6, 8*(off+12)(block); \
- MOVOU X7, 8*(off+14)(block)
-
-#define LOAD_MSG_1(block, off) \
- MOVOU 8*off+0*8(block), X0; \
- MOVOU 8*off+16*8(block), X1; \
- MOVOU 8*off+32*8(block), X2; \
- MOVOU 8*off+48*8(block), X3; \
- MOVOU 8*off+64*8(block), X4; \
- MOVOU 8*off+80*8(block), X5; \
- MOVOU 8*off+96*8(block), X6; \
- MOVOU 8*off+112*8(block), X7
-
-#define STORE_MSG_1(block, off) \
- MOVOU X0, 8*off+0*8(block); \
- MOVOU X1, 8*off+16*8(block); \
- MOVOU X2, 8*off+32*8(block); \
- MOVOU X3, 8*off+48*8(block); \
- MOVOU X4, 8*off+64*8(block); \
- MOVOU X5, 8*off+80*8(block); \
- MOVOU X6, 8*off+96*8(block); \
- MOVOU X7, 8*off+112*8(block)
-
-#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
- LOAD_MSG_0(block, off); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
- STORE_MSG_0(block, off)
-
-#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
- LOAD_MSG_1(block, off); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
- STORE_MSG_1(block, off)
-
// func blamkaSSE4(b *block)
-TEXT ·blamkaSSE4(SB), 4, $0-8
- MOVQ b+0(FP), AX
-
- MOVOU ·c40<>(SB), X10
- MOVOU ·c48<>(SB), X11
+// Requires: SSE2, SSSE3
+TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8
+ MOVQ b+0(FP), AX
+ MOVOU ·c40<>+0(SB), X10
+ MOVOU ·c48<>+0(SB), X11
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU 32(AX), X2
+ MOVOU 48(AX), X3
+ MOVOU 64(AX), X4
+ MOVOU 80(AX), X5
+ MOVOU 96(AX), X6
+ MOVOU 112(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, (AX)
+ MOVOU X1, 16(AX)
+ MOVOU X2, 32(AX)
+ MOVOU X3, 48(AX)
+ MOVOU X4, 64(AX)
+ MOVOU X5, 80(AX)
+ MOVOU X6, 96(AX)
+ MOVOU X7, 112(AX)
+ MOVOU 128(AX), X0
+ MOVOU 144(AX), X1
+ MOVOU 160(AX), X2
+ MOVOU 176(AX), X3
+ MOVOU 192(AX), X4
+ MOVOU 208(AX), X5
+ MOVOU 224(AX), X6
+ MOVOU 240(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 128(AX)
+ MOVOU X1, 144(AX)
+ MOVOU X2, 160(AX)
+ MOVOU X3, 176(AX)
+ MOVOU X4, 192(AX)
+ MOVOU X5, 208(AX)
+ MOVOU X6, 224(AX)
+ MOVOU X7, 240(AX)
+ MOVOU 256(AX), X0
+ MOVOU 272(AX), X1
+ MOVOU 288(AX), X2
+ MOVOU 304(AX), X3
+ MOVOU 320(AX), X4
+ MOVOU 336(AX), X5
+ MOVOU 352(AX), X6
+ MOVOU 368(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 256(AX)
+ MOVOU X1, 272(AX)
+ MOVOU X2, 288(AX)
+ MOVOU X3, 304(AX)
+ MOVOU X4, 320(AX)
+ MOVOU X5, 336(AX)
+ MOVOU X6, 352(AX)
+ MOVOU X7, 368(AX)
+ MOVOU 384(AX), X0
+ MOVOU 400(AX), X1
+ MOVOU 416(AX), X2
+ MOVOU 432(AX), X3
+ MOVOU 448(AX), X4
+ MOVOU 464(AX), X5
+ MOVOU 480(AX), X6
+ MOVOU 496(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 384(AX)
+ MOVOU X1, 400(AX)
+ MOVOU X2, 416(AX)
+ MOVOU X3, 432(AX)
+ MOVOU X4, 448(AX)
+ MOVOU X5, 464(AX)
+ MOVOU X6, 480(AX)
+ MOVOU X7, 496(AX)
+ MOVOU 512(AX), X0
+ MOVOU 528(AX), X1
+ MOVOU 544(AX), X2
+ MOVOU 560(AX), X3
+ MOVOU 576(AX), X4
+ MOVOU 592(AX), X5
+ MOVOU 608(AX), X6
+ MOVOU 624(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 512(AX)
+ MOVOU X1, 528(AX)
+ MOVOU X2, 544(AX)
+ MOVOU X3, 560(AX)
+ MOVOU X4, 576(AX)
+ MOVOU X5, 592(AX)
+ MOVOU X6, 608(AX)
+ MOVOU X7, 624(AX)
+ MOVOU 640(AX), X0
+ MOVOU 656(AX), X1
+ MOVOU 672(AX), X2
+ MOVOU 688(AX), X3
+ MOVOU 704(AX), X4
+ MOVOU 720(AX), X5
+ MOVOU 736(AX), X6
+ MOVOU 752(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 640(AX)
+ MOVOU X1, 656(AX)
+ MOVOU X2, 672(AX)
+ MOVOU X3, 688(AX)
+ MOVOU X4, 704(AX)
+ MOVOU X5, 720(AX)
+ MOVOU X6, 736(AX)
+ MOVOU X7, 752(AX)
+ MOVOU 768(AX), X0
+ MOVOU 784(AX), X1
+ MOVOU 800(AX), X2
+ MOVOU 816(AX), X3
+ MOVOU 832(AX), X4
+ MOVOU 848(AX), X5
+ MOVOU 864(AX), X6
+ MOVOU 880(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 768(AX)
+ MOVOU X1, 784(AX)
+ MOVOU X2, 800(AX)
+ MOVOU X3, 816(AX)
+ MOVOU X4, 832(AX)
+ MOVOU X5, 848(AX)
+ MOVOU X6, 864(AX)
+ MOVOU X7, 880(AX)
+ MOVOU 896(AX), X0
+ MOVOU 912(AX), X1
+ MOVOU 928(AX), X2
+ MOVOU 944(AX), X3
+ MOVOU 960(AX), X4
+ MOVOU 976(AX), X5
+ MOVOU 992(AX), X6
+ MOVOU 1008(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 896(AX)
+ MOVOU X1, 912(AX)
+ MOVOU X2, 928(AX)
+ MOVOU X3, 944(AX)
+ MOVOU X4, 960(AX)
+ MOVOU X5, 976(AX)
+ MOVOU X6, 992(AX)
+ MOVOU X7, 1008(AX)
+ MOVOU (AX), X0
+ MOVOU 128(AX), X1
+ MOVOU 256(AX), X2
+ MOVOU 384(AX), X3
+ MOVOU 512(AX), X4
+ MOVOU 640(AX), X5
+ MOVOU 768(AX), X6
+ MOVOU 896(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, (AX)
+ MOVOU X1, 128(AX)
+ MOVOU X2, 256(AX)
+ MOVOU X3, 384(AX)
+ MOVOU X4, 512(AX)
+ MOVOU X5, 640(AX)
+ MOVOU X6, 768(AX)
+ MOVOU X7, 896(AX)
+ MOVOU 16(AX), X0
+ MOVOU 144(AX), X1
+ MOVOU 272(AX), X2
+ MOVOU 400(AX), X3
+ MOVOU 528(AX), X4
+ MOVOU 656(AX), X5
+ MOVOU 784(AX), X6
+ MOVOU 912(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 16(AX)
+ MOVOU X1, 144(AX)
+ MOVOU X2, 272(AX)
+ MOVOU X3, 400(AX)
+ MOVOU X4, 528(AX)
+ MOVOU X5, 656(AX)
+ MOVOU X6, 784(AX)
+ MOVOU X7, 912(AX)
+ MOVOU 32(AX), X0
+ MOVOU 160(AX), X1
+ MOVOU 288(AX), X2
+ MOVOU 416(AX), X3
+ MOVOU 544(AX), X4
+ MOVOU 672(AX), X5
+ MOVOU 800(AX), X6
+ MOVOU 928(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 32(AX)
+ MOVOU X1, 160(AX)
+ MOVOU X2, 288(AX)
+ MOVOU X3, 416(AX)
+ MOVOU X4, 544(AX)
+ MOVOU X5, 672(AX)
+ MOVOU X6, 800(AX)
+ MOVOU X7, 928(AX)
+ MOVOU 48(AX), X0
+ MOVOU 176(AX), X1
+ MOVOU 304(AX), X2
+ MOVOU 432(AX), X3
+ MOVOU 560(AX), X4
+ MOVOU 688(AX), X5
+ MOVOU 816(AX), X6
+ MOVOU 944(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 48(AX)
+ MOVOU X1, 176(AX)
+ MOVOU X2, 304(AX)
+ MOVOU X3, 432(AX)
+ MOVOU X4, 560(AX)
+ MOVOU X5, 688(AX)
+ MOVOU X6, 816(AX)
+ MOVOU X7, 944(AX)
+ MOVOU 64(AX), X0
+ MOVOU 192(AX), X1
+ MOVOU 320(AX), X2
+ MOVOU 448(AX), X3
+ MOVOU 576(AX), X4
+ MOVOU 704(AX), X5
+ MOVOU 832(AX), X6
+ MOVOU 960(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 64(AX)
+ MOVOU X1, 192(AX)
+ MOVOU X2, 320(AX)
+ MOVOU X3, 448(AX)
+ MOVOU X4, 576(AX)
+ MOVOU X5, 704(AX)
+ MOVOU X6, 832(AX)
+ MOVOU X7, 960(AX)
+ MOVOU 80(AX), X0
+ MOVOU 208(AX), X1
+ MOVOU 336(AX), X2
+ MOVOU 464(AX), X3
+ MOVOU 592(AX), X4
+ MOVOU 720(AX), X5
+ MOVOU 848(AX), X6
+ MOVOU 976(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 80(AX)
+ MOVOU X1, 208(AX)
+ MOVOU X2, 336(AX)
+ MOVOU X3, 464(AX)
+ MOVOU X4, 592(AX)
+ MOVOU X5, 720(AX)
+ MOVOU X6, 848(AX)
+ MOVOU X7, 976(AX)
+ MOVOU 96(AX), X0
+ MOVOU 224(AX), X1
+ MOVOU 352(AX), X2
+ MOVOU 480(AX), X3
+ MOVOU 608(AX), X4
+ MOVOU 736(AX), X5
+ MOVOU 864(AX), X6
+ MOVOU 992(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 96(AX)
+ MOVOU X1, 224(AX)
+ MOVOU X2, 352(AX)
+ MOVOU X3, 480(AX)
+ MOVOU X4, 608(AX)
+ MOVOU X5, 736(AX)
+ MOVOU X6, 864(AX)
+ MOVOU X7, 992(AX)
+ MOVOU 112(AX), X0
+ MOVOU 240(AX), X1
+ MOVOU 368(AX), X2
+ MOVOU 496(AX), X3
+ MOVOU 624(AX), X4
+ MOVOU 752(AX), X5
+ MOVOU 880(AX), X6
+ MOVOU 1008(AX), X7
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFD $0xb1, X6, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ PSHUFB X10, X2
+ MOVO X0, X8
+ PMULULQ X2, X8
+ PADDQ X2, X0
+ PADDQ X8, X0
+ PADDQ X8, X0
+ PXOR X0, X6
+ PSHUFB X11, X6
+ MOVO X4, X8
+ PMULULQ X6, X8
+ PADDQ X6, X4
+ PADDQ X8, X4
+ PADDQ X8, X4
+ PXOR X4, X2
+ MOVO X2, X8
+ PADDQ X2, X8
+ PSRLQ $0x3f, X2
+ PXOR X8, X2
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFD $0xb1, X7, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ PSHUFB X10, X3
+ MOVO X1, X8
+ PMULULQ X3, X8
+ PADDQ X3, X1
+ PADDQ X8, X1
+ PADDQ X8, X1
+ PXOR X1, X7
+ PSHUFB X11, X7
+ MOVO X5, X8
+ PMULULQ X7, X8
+ PADDQ X7, X5
+ PADDQ X8, X5
+ PADDQ X8, X5
+ PXOR X5, X3
+ MOVO X3, X8
+ PADDQ X3, X8
+ PSRLQ $0x3f, X3
+ PXOR X8, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU X0, 112(AX)
+ MOVOU X1, 240(AX)
+ MOVOU X2, 368(AX)
+ MOVOU X3, 496(AX)
+ MOVOU X4, 624(AX)
+ MOVOU X5, 752(AX)
+ MOVOU X6, 880(AX)
+ MOVOU X7, 1008(AX)
+ RET
- BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
- BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
+DATA ·c40<>+0(SB)/8, $0x0201000706050403
+DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·c40<>(SB), RODATA|NOPTR, $16
- BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
- BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
- RET
+DATA ·c48<>+0(SB)/8, $0x0100070605040302
+DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·c48<>(SB), RODATA|NOPTR, $16
-// func mixBlocksSSE2(out, a, b, c *block)
-TEXT ·mixBlocksSSE2(SB), 4, $0-32
+// func mixBlocksSSE2(out *block, a *block, b *block, c *block)
+// Requires: SSE2
+TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32
MOVQ out+0(FP), DX
MOVQ a+8(FP), AX
MOVQ b+16(FP), BX
MOVQ c+24(FP), CX
- MOVQ $128, DI
+ MOVQ $0x00000080, DI
loop:
- MOVOU 0(AX), X0
- MOVOU 0(BX), X1
- MOVOU 0(CX), X2
+ MOVOU (AX), X0
+ MOVOU (BX), X1
+ MOVOU (CX), X2
PXOR X1, X0
PXOR X2, X0
- MOVOU X0, 0(DX)
- ADDQ $16, AX
- ADDQ $16, BX
- ADDQ $16, CX
- ADDQ $16, DX
- SUBQ $2, DI
+ MOVOU X0, (DX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, BX
+ ADDQ $0x10, CX
+ ADDQ $0x10, DX
+ SUBQ $0x02, DI
JA loop
RET
-// func xorBlocksSSE2(out, a, b, c *block)
-TEXT ·xorBlocksSSE2(SB), 4, $0-32
+// func xorBlocksSSE2(out *block, a *block, b *block, c *block)
+// Requires: SSE2
+TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32
MOVQ out+0(FP), DX
MOVQ a+8(FP), AX
MOVQ b+16(FP), BX
MOVQ c+24(FP), CX
- MOVQ $128, DI
+ MOVQ $0x00000080, DI
loop:
- MOVOU 0(AX), X0
- MOVOU 0(BX), X1
- MOVOU 0(CX), X2
- MOVOU 0(DX), X3
+ MOVOU (AX), X0
+ MOVOU (BX), X1
+ MOVOU (CX), X2
+ MOVOU (DX), X3
PXOR X1, X0
PXOR X2, X0
PXOR X3, X0
- MOVOU X0, 0(DX)
- ADDQ $16, AX
- ADDQ $16, BX
- ADDQ $16, CX
- ADDQ $16, DX
- SUBQ $2, DI
+ MOVOU X0, (DX)
+ ADDQ $0x10, AX
+ ADDQ $0x10, BX
+ ADDQ $0x10, CX
+ ADDQ $0x10, DX
+ SUBQ $0x02, DI
JA loop
RET
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
index 9ae8206c..f75162e0 100644
--- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
+++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
@@ -1,722 +1,4517 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT.
//go:build amd64 && gc && !purego
#include "textflag.h"
-DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
-DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
-DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
-
-DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
-
-#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
-#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
-#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
-#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
-#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
-
-#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
- VPADDQ m0, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m1, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y1_Y1; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y3_Y3; \
- VPADDQ m2, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFD $-79, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPSHUFB c40, Y1, Y1; \
- VPADDQ m3, Y0, Y0; \
- VPADDQ Y1, Y0, Y0; \
- VPXOR Y0, Y3, Y3; \
- VPSHUFB c48, Y3, Y3; \
- VPADDQ Y3, Y2, Y2; \
- VPXOR Y2, Y1, Y1; \
- VPADDQ Y1, Y1, t; \
- VPSRLQ $63, Y1, Y1; \
- VPXOR t, Y1, Y1; \
- VPERMQ_0x39_Y3_Y3; \
- VPERMQ_0x4E_Y2_Y2; \
- VPERMQ_0x93_Y1_Y1
-
-#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
-#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
-#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
-#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
-#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
-
-#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
-#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
-#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
-#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
-#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
-
-#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
-#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
-#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
-#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
-#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
-
-#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
-#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
-
-#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
-#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
-
-// load msg: Y12 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y12, Y12
-
-// load msg: Y13 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
- VMOVQ_SI_X13(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X13(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y13, Y13
-
-// load msg: Y14 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
- VMOVQ_SI_X14(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X14(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y14, Y14
-
-// load msg: Y15 = (i0, i1, i2, i3)
-// i0, i1, i2, i3 must not be 0
-#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
- VMOVQ_SI_X15(i0*8); \
- VMOVQ_SI_X11(i2*8); \
- VPINSRQ_1_SI_X15(i1*8); \
- VPINSRQ_1_SI_X11(i3*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X11(6*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
- LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
- LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
-
-#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
- LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
- LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
- VMOVQ_SI_X11(11*8); \
- VPSHUFD $0x4E, 0*8(SI), X14; \
- VPINSRQ_1_SI_X11(5*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
-
-#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
- VMOVQ_SI_X11(5*8); \
- VMOVDQU 11*8(SI), X12; \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- VMOVQ_SI_X13(8*8); \
- VMOVQ_SI_X11(2*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X11(13*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
- LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
-
-#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
- LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
- LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
- LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
- VMOVQ_SI_X15(6*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X15(10*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
- LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X11(4*8); \
- VPINSRQ_1_SI_X13(7*8); \
- VPINSRQ_1_SI_X11(15*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
- LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
-
-#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X11_0; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X11(8*8); \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
- LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
- LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
-
-#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
- LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
- LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
- VMOVQ_SI_X14_0; \
- VPSHUFD $0x4E, 8*8(SI), X11; \
- VPINSRQ_1_SI_X14(6*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
-
-#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
- LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
- LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
- LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
- VMOVQ_SI_X15_0; \
- VMOVQ_SI_X11(6*8); \
- VPINSRQ_1_SI_X15(4*8); \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
- VMOVQ_SI_X12(6*8); \
- VMOVQ_SI_X11(11*8); \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y12, Y12; \
- LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
- VMOVQ_SI_X11(1*8); \
- VMOVDQU 12*8(SI), X14; \
- VPINSRQ_1_SI_X11(10*8); \
- VINSERTI128 $1, X11, Y14, Y14; \
- VMOVQ_SI_X15(2*8); \
- VMOVDQU 4*8(SI), X11; \
- VPINSRQ_1_SI_X15(7*8); \
- VINSERTI128 $1, X11, Y15, Y15
-
-#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
- LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
- VMOVQ_SI_X13(2*8); \
- VPSHUFD $0x4E, 5*8(SI), X11; \
- VPINSRQ_1_SI_X13(4*8); \
- VINSERTI128 $1, X11, Y13, Y13; \
- LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
- VMOVQ_SI_X15(11*8); \
- VMOVQ_SI_X11(12*8); \
- VPINSRQ_1_SI_X15(14*8); \
- VPINSRQ_1_SI_X11_0; \
- VINSERTI128 $1, X11, Y15, Y15
-
// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, DX
- ADDQ $31, DX
- ANDQ $~31, DX
-
- MOVQ CX, 16(DX)
- XORQ CX, CX
- MOVQ CX, 24(DX)
-
- VMOVDQU ·AVX2_c40<>(SB), Y4
- VMOVDQU ·AVX2_c48<>(SB), Y5
-
- VMOVDQU 0(AX), Y8
+// Requires: AVX, AVX2
+TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+ MOVQ SP, DX
+ ADDQ $+31, DX
+ ANDQ $-32, DX
+ MOVQ CX, 16(DX)
+ XORQ CX, CX
+ MOVQ CX, 24(DX)
+ VMOVDQU ·AVX2_c40<>+0(SB), Y4
+ VMOVDQU ·AVX2_c48<>+0(SB), Y5
+ VMOVDQU (AX), Y8
VMOVDQU 32(AX), Y9
- VMOVDQU ·AVX2_iv0<>(SB), Y6
- VMOVDQU ·AVX2_iv1<>(SB), Y7
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
- MOVQ R9, 8(DX)
+ VMOVDQU ·AVX2_iv0<>+0(SB), Y6
+ VMOVDQU ·AVX2_iv1<>+0(SB), Y7
+ MOVQ (BX), R8
+ MOVQ 8(BX), R9
+ MOVQ R9, 8(DX)
loop:
- ADDQ $128, R8
- MOVQ R8, 0(DX)
- CMPQ R8, $128
+ ADDQ $0x80, R8
+ MOVQ R8, (DX)
+ CMPQ R8, $0x80
JGE noinc
INCQ R9
MOVQ R9, 8(DX)
noinc:
- VMOVDQA Y8, Y0
- VMOVDQA Y9, Y1
- VMOVDQA Y6, Y2
- VPXOR 0(DX), Y7, Y3
-
- LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
- VMOVDQA Y12, 32(DX)
- VMOVDQA Y13, 64(DX)
- VMOVDQA Y14, 96(DX)
- VMOVDQA Y15, 128(DX)
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
- VMOVDQA Y12, 160(DX)
- VMOVDQA Y13, 192(DX)
- VMOVDQA Y14, 224(DX)
- VMOVDQA Y15, 256(DX)
-
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
- LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
- ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
-
- ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5)
- ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5)
-
- VPXOR Y0, Y8, Y8
- VPXOR Y1, Y9, Y9
- VPXOR Y2, Y8, Y8
- VPXOR Y3, Y9, Y9
-
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
-
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
-
- VMOVDQU Y8, 0(AX)
- VMOVDQU Y9, 32(AX)
+ VMOVDQA Y8, Y0
+ VMOVDQA Y9, Y1
+ VMOVDQA Y6, Y2
+ VPXOR (DX), Y7, Y3
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x26
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x20
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x10
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x30
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x08
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x28
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x38
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x40
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x60
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x70
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x58
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VMOVDQA Y12, 32(DX)
+ VMOVDQA Y13, 64(DX)
+ VMOVDQA Y14, 96(DX)
+ VMOVDQA Y15, 128(DX)
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x48
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x40
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x30
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x58
+ VPSHUFD $0x4e, (SI), X14
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x28
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x38
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x10
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VMOVDQA Y12, 160(DX)
+ VMOVDQA Y13, 192(DX)
+ VMOVDQA Y14, 224(DX)
+ VMOVDQA Y15, 256(DX)
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x28
+ VMOVDQU 88(SI), X12
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x40
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x10
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x2e
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x38
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x48
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x08
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x20
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x38
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x58
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x60
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x70
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x20
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x30
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x1e
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x40
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x10
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x50
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x2e
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x20
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x30
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x58
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x08
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x40
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x60
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x1e
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x40
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x58
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x20
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x78
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x08
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x68
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x70
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x48
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x70
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x20
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x28
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x50
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x36
+ VPSHUFD $0x4e, 64(SI), X11
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x30
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x38
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x10
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x58
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x68
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x60
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x58
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x08
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x48
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x28
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x40
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x10
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x3e
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x30
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x50
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x30
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x58
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x1e
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x78
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x48
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x40
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x08
+ VMOVDQU 96(SI), X14
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x50
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x10
+ VMOVDQU 32(SI), X11
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x38
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x38
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x40
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x08
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y12, Y12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x10
+ VPSHUFD $0x4e, 40(SI), X11
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x20
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y13, Y13
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x78
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x18
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x48
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x5e
+ BYTE $0x68
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y14, Y14
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x58
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x5e
+ BYTE $0x60
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0xa1
+ BYTE $0x22
+ BYTE $0x1e
+ BYTE $0x01
+ VINSERTI128 $0x01, X11, Y15, Y15
+ VPADDQ Y12, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y13, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ Y14, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ Y15, Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ VPADDQ 32(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ 64(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ 96(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ 128(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ VPADDQ 160(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ 192(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x93
+ VPADDQ 224(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFD $-79, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPSHUFB Y4, Y1, Y1
+ VPADDQ 256(DX), Y0, Y0
+ VPADDQ Y1, Y0, Y0
+ VPXOR Y0, Y3, Y3
+ VPSHUFB Y5, Y3, Y3
+ VPADDQ Y3, Y2, Y2
+ VPXOR Y2, Y1, Y1
+ VPADDQ Y1, Y1, Y10
+ VPSRLQ $0x3f, Y1, Y1
+ VPXOR Y10, Y1, Y1
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xdb
+ BYTE $0x39
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xd2
+ BYTE $0x4e
+ BYTE $0xc4
+ BYTE $0xe3
+ BYTE $0xfd
+ BYTE $0x00
+ BYTE $0xc9
+ BYTE $0x93
+ VPXOR Y0, Y8, Y8
+ VPXOR Y1, Y9, Y9
+ VPXOR Y2, Y8, Y8
+ VPXOR Y3, Y9, Y9
+ LEAQ 128(SI), SI
+ SUBQ $0x80, DI
+ JNE loop
+ MOVQ R8, (BX)
+ MOVQ R9, 8(BX)
+ VMOVDQU Y8, (AX)
+ VMOVDQU Y9, 32(AX)
VZEROUPPER
-
RET
-#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
-#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
-#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
-#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
-#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
-
-#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
-#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
-#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
-#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
-#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
-
-#define SHUFFLE_AVX() \
- VMOVDQA X6, X13; \
- VMOVDQA X2, X14; \
- VMOVDQA X4, X6; \
- VPUNPCKLQDQ_X13_X13_X15; \
- VMOVDQA X5, X4; \
- VMOVDQA X6, X5; \
- VPUNPCKHQDQ_X15_X7_X6; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X13_X7; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VPUNPCKHQDQ_X15_X2_X2; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X3_X3; \
-
-#define SHUFFLE_AVX_INV() \
- VMOVDQA X2, X13; \
- VMOVDQA X4, X14; \
- VPUNPCKLQDQ_X2_X2_X15; \
- VMOVDQA X5, X4; \
- VPUNPCKHQDQ_X15_X3_X2; \
- VMOVDQA X14, X5; \
- VPUNPCKLQDQ_X3_X3_X15; \
- VMOVDQA X6, X14; \
- VPUNPCKHQDQ_X15_X13_X3; \
- VPUNPCKLQDQ_X7_X7_X15; \
- VPUNPCKHQDQ_X15_X6_X6; \
- VPUNPCKLQDQ_X14_X14_X15; \
- VPUNPCKHQDQ_X15_X7_X7; \
-
-#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- VPADDQ m0, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m1, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFD $-79, v6, v6; \
- VPSHUFD $-79, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPSHUFB c40, v2, v2; \
- VPSHUFB c40, v3, v3; \
- VPADDQ m2, v0, v0; \
- VPADDQ v2, v0, v0; \
- VPADDQ m3, v1, v1; \
- VPADDQ v3, v1, v1; \
- VPXOR v0, v6, v6; \
- VPXOR v1, v7, v7; \
- VPSHUFB c48, v6, v6; \
- VPSHUFB c48, v7, v7; \
- VPADDQ v6, v4, v4; \
- VPADDQ v7, v5, v5; \
- VPXOR v4, v2, v2; \
- VPXOR v5, v3, v3; \
- VPADDQ v2, v2, t0; \
- VPSRLQ $63, v2, v2; \
- VPXOR t0, v2, v2; \
- VPADDQ v3, v3, t0; \
- VPSRLQ $63, v3, v3; \
- VPXOR t0, v3, v3
-
-// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
-// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
-#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
- VMOVQ_SI_X12(i0*8); \
- VMOVQ_SI_X13(i2*8); \
- VMOVQ_SI_X14(i4*8); \
- VMOVQ_SI_X15(i6*8); \
- VPINSRQ_1_SI_X12(i1*8); \
- VPINSRQ_1_SI_X13(i3*8); \
- VPINSRQ_1_SI_X14(i5*8); \
- VPINSRQ_1_SI_X15(i7*8)
-
-// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
-#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
- VMOVQ_SI_X12_0; \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(1*8); \
- VMOVQ_SI_X15(5*8); \
- VPINSRQ_1_SI_X12(2*8); \
- VPINSRQ_1_SI_X13(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(7*8)
-
-// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
-#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
- VPSHUFD $0x4E, 0*8(SI), X12; \
- VMOVQ_SI_X13(11*8); \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(7*8); \
- VPINSRQ_1_SI_X13(5*8); \
- VPINSRQ_1_SI_X14(2*8); \
- VPINSRQ_1_SI_X15(3*8)
-
-// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
-#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
- VMOVDQU 11*8(SI), X12; \
- VMOVQ_SI_X13(5*8); \
- VMOVQ_SI_X14(8*8); \
- VMOVQ_SI_X15(2*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14_0; \
- VPINSRQ_1_SI_X15(13*8)
-
-// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
-#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13(4*8); \
- VMOVQ_SI_X14(6*8); \
- VMOVQ_SI_X15_0; \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(15*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(8*8)
+DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b
+DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403
+DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32
-// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
-#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
- VMOVQ_SI_X12(9*8); \
- VMOVQ_SI_X13(2*8); \
- VMOVQ_SI_X14_0; \
- VMOVQ_SI_X15(4*8); \
- VPINSRQ_1_SI_X12(5*8); \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VPINSRQ_1_SI_X15(15*8)
+DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a
+DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302
+DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32
-// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
-#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
- VMOVQ_SI_X12(2*8); \
- VMOVQ_SI_X13_0; \
- VMOVQ_SI_X14(12*8); \
- VMOVQ_SI_X15(11*8); \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X13(8*8); \
- VPINSRQ_1_SI_X14(10*8); \
- VPINSRQ_1_SI_X15(3*8)
+DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908
+DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b
+DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32
-// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
-#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
- MOVQ 0*8(SI), X12; \
- VPSHUFD $0x4E, 8*8(SI), X13; \
- MOVQ 7*8(SI), X14; \
- MOVQ 2*8(SI), X15; \
- VPINSRQ_1_SI_X12(6*8); \
- VPINSRQ_1_SI_X14(3*8); \
- VPINSRQ_1_SI_X15(11*8)
-
-// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
-#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
- MOVQ 6*8(SI), X12; \
- MOVQ 11*8(SI), X13; \
- MOVQ 15*8(SI), X14; \
- MOVQ 3*8(SI), X15; \
- VPINSRQ_1_SI_X12(14*8); \
- VPINSRQ_1_SI_X13_0; \
- VPINSRQ_1_SI_X14(9*8); \
- VPINSRQ_1_SI_X15(8*8)
-
-// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
-#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
- MOVQ 5*8(SI), X12; \
- MOVQ 8*8(SI), X13; \
- MOVQ 0*8(SI), X14; \
- MOVQ 6*8(SI), X15; \
- VPINSRQ_1_SI_X12(15*8); \
- VPINSRQ_1_SI_X13(2*8); \
- VPINSRQ_1_SI_X14(4*8); \
- VPINSRQ_1_SI_X15(10*8)
-
-// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
-#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
- VMOVDQU 12*8(SI), X12; \
- MOVQ 1*8(SI), X13; \
- MOVQ 2*8(SI), X14; \
- VPINSRQ_1_SI_X13(10*8); \
- VPINSRQ_1_SI_X14(7*8); \
- VMOVDQU 4*8(SI), X15
-
-// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
-#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
- MOVQ 15*8(SI), X12; \
- MOVQ 3*8(SI), X13; \
- MOVQ 11*8(SI), X14; \
- MOVQ 12*8(SI), X15; \
- VPINSRQ_1_SI_X12(9*8); \
- VPINSRQ_1_SI_X13(13*8); \
- VPINSRQ_1_SI_X14(14*8); \
- VPINSRQ_1_SI_X15_0
+DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1
+DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f
+DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32
// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- VMOVDQU ·AVX_c40<>(SB), X0
- VMOVDQU ·AVX_c48<>(SB), X1
+// Requires: AVX, SSE2
+TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+ MOVQ SP, R10
+ ADDQ $0x0f, R10
+ ANDQ $-16, R10
+ VMOVDQU ·AVX_c40<>+0(SB), X0
+ VMOVDQU ·AVX_c48<>+0(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
-
- VMOVDQU ·AVX_iv3<>(SB), X0
- VMOVDQA X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0)
-
- VMOVDQU 0(AX), X10
+ VMOVDQU ·AVX_iv3<>+0(SB), X0
+ VMOVDQA X0, (R10)
+ XORQ CX, (R10)
+ VMOVDQU (AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
+ MOVQ (BX), R8
+ MOVQ 8(BX), R9
loop:
- ADDQ $128, R8
- CMPQ R8, $128
+ ADDQ $0x80, R8
+ CMPQ R8, $0x80
JGE noinc
INCQ R9
noinc:
- VMOVQ_R8_X15
- VPINSRQ_1_R9_X15
-
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0xf9
+ BYTE $0x6e
+ BYTE $0xf8
+ BYTE $0xc4
+ BYTE $0x43
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0xf9
+ BYTE $0x01
VMOVDQA X10, X0
VMOVDQA X11, X1
- VMOVDQU ·AVX_iv0<>(SB), X4
- VMOVDQU ·AVX_iv1<>(SB), X5
- VMOVDQU ·AVX_iv2<>(SB), X6
-
+ VMOVDQU ·AVX_iv0<>+0(SB), X4
+ VMOVDQU ·AVX_iv1<>+0(SB), X5
+ VMOVDQU ·AVX_iv2<>+0(SB), X6
VPXOR X15, X6, X6
- VMOVDQA 0(R10), X7
-
- LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
+ VMOVDQA (R10), X7
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x26
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x20
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x08
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x28
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x10
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x38
+ BYTE $0x01
VMOVDQA X12, 16(R10)
VMOVDQA X13, 32(R10)
VMOVDQA X14, 48(R10)
VMOVDQA X15, 64(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x40
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x68
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x58
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x78
+ BYTE $0x01
VMOVDQA X12, 80(R10)
VMOVDQA X13, 96(R10)
VMOVDQA X14, 112(R10)
VMOVDQA X15, 128(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x78
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x68
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x40
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x30
+ BYTE $0x01
VMOVDQA X12, 144(R10)
VMOVDQA X13, 160(R10)
VMOVDQA X14, 176(R10)
VMOVDQA X15, 192(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ VPSHUFD $0x4e, (SI), X12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x58
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x38
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x10
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x18
+ BYTE $0x01
VMOVDQA X12, 208(R10)
VMOVDQA X13, 224(R10)
VMOVDQA X14, 240(R10)
VMOVDQA X15, 256(R10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX()
- LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9)
- SHUFFLE_AVX()
- HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9)
- SHUFFLE_AVX_INV()
-
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ VMOVDQU 88(SI), X12
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x28
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x40
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x10
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x36
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x68
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x38
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x08
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x48
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x20
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x38
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x68
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x60
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x58
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x70
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x20
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x30
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x3e
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x40
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x48
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x36
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x20
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x78
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x30
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x08
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x40
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x58
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x60
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x68
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x2e
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x58
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x40
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x18
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x20
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x78
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x68
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x70
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x28
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x48
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x70
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x28
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x68
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x50
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ MOVQ (SI), X12
+ VPSHUFD $0x4e, 64(SI), X13
+ MOVQ 56(SI), X14
+ MOVQ 16(SI), X15
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x30
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x58
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x68
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x60
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x58
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x08
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x38
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x18
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x48
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ MOVQ 40(SI), X12
+ MOVQ 64(SI), X13
+ MOVQ (SI), X14
+ MOVQ 48(SI), X15
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x78
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x10
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x50
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ MOVQ 48(SI), X12
+ MOVQ 88(SI), X13
+ MOVQ 120(SI), X14
+ MOVQ 24(SI), X15
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x2e
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x48
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x40
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ VMOVDQU 96(SI), X12
+ MOVQ 8(SI), X13
+ MOVQ 16(SI), X14
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x50
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x38
+ BYTE $0x01
+ VMOVDQU 32(SI), X15
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x66
+ BYTE $0x50
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x6e
+ BYTE $0x38
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x76
+ BYTE $0x10
+ BYTE $0xc5
+ BYTE $0x7a
+ BYTE $0x7e
+ BYTE $0x7e
+ BYTE $0x30
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x40
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x08
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x20
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x7e
+ BYTE $0x28
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ MOVQ 120(SI), X12
+ MOVQ 24(SI), X13
+ MOVQ 88(SI), X14
+ MOVQ 96(SI), X15
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x99
+ BYTE $0x22
+ BYTE $0x66
+ BYTE $0x48
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x91
+ BYTE $0x22
+ BYTE $0x6e
+ BYTE $0x68
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x89
+ BYTE $0x22
+ BYTE $0x76
+ BYTE $0x70
+ BYTE $0x01
+ BYTE $0xc4
+ BYTE $0x63
+ BYTE $0x81
+ BYTE $0x22
+ BYTE $0x3e
+ BYTE $0x01
+ VPADDQ X12, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X13, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ X14, X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ X15, X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ VPADDQ 16(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 32(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ 48(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 64(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ VPADDQ 80(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 96(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ 112(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 128(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
+ VPADDQ 144(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 160(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ 176(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 192(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X6, X13
+ VMOVDQA X2, X14
+ VMOVDQA X4, X6
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x11
+ BYTE $0x6c
+ BYTE $0xfd
+ VMOVDQA X5, X4
+ VMOVDQA X6, X5
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xff
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x69
+ BYTE $0x6d
+ BYTE $0xd7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xdf
+ VPADDQ 208(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 224(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFD $-79, X6, X6
+ VPSHUFD $-79, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPSHUFB X8, X2, X2
+ VPSHUFB X8, X3, X3
+ VPADDQ 240(R10), X0, X0
+ VPADDQ X2, X0, X0
+ VPADDQ 256(R10), X1, X1
+ VPADDQ X3, X1, X1
+ VPXOR X0, X6, X6
+ VPXOR X1, X7, X7
+ VPSHUFB X9, X6, X6
+ VPSHUFB X9, X7, X7
+ VPADDQ X6, X4, X4
+ VPADDQ X7, X5, X5
+ VPXOR X4, X2, X2
+ VPXOR X5, X3, X3
+ VPADDQ X2, X2, X15
+ VPSRLQ $0x3f, X2, X2
+ VPXOR X15, X2, X2
+ VPADDQ X3, X3, X15
+ VPSRLQ $0x3f, X3, X3
+ VPXOR X15, X3, X3
+ VMOVDQA X2, X13
+ VMOVDQA X4, X14
+ BYTE $0xc5
+ BYTE $0x69
+ BYTE $0x6c
+ BYTE $0xfa
+ VMOVDQA X5, X4
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x61
+ BYTE $0x6d
+ BYTE $0xd7
+ VMOVDQA X14, X5
+ BYTE $0xc5
+ BYTE $0x61
+ BYTE $0x6c
+ BYTE $0xfb
+ VMOVDQA X6, X14
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x11
+ BYTE $0x6d
+ BYTE $0xdf
+ BYTE $0xc5
+ BYTE $0x41
+ BYTE $0x6c
+ BYTE $0xff
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x49
+ BYTE $0x6d
+ BYTE $0xf7
+ BYTE $0xc4
+ BYTE $0x41
+ BYTE $0x09
+ BYTE $0x6c
+ BYTE $0xfe
+ BYTE $0xc4
+ BYTE $0xc1
+ BYTE $0x41
+ BYTE $0x6d
+ BYTE $0xff
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
@@ -729,16 +4524,36 @@ noinc:
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
+ LEAQ 128(SI), SI
+ SUBQ $0x80, DI
+ JNE loop
+ VMOVDQU X10, (AX)
+ VMOVDQU X11, 16(AX)
+ MOVQ R8, (BX)
+ MOVQ R9, 8(BX)
+ VZEROUPPER
+ RET
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
+DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403
+DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16
- VMOVDQU X10, 0(AX)
- VMOVDQU X11, 16(AX)
+DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302
+DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
- VZEROUPPER
+DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179
+GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16
- RET
+DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908
+DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b
+GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16
+
+DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b
+DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16
+
+DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1
+DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f
+GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16
diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
index adfac00c..9a0ce212 100644
--- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
+++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
@@ -1,278 +1,1441 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT.
//go:build amd64 && gc && !purego
#include "textflag.h"
-DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
-DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
-GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
-DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
-GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
-DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
-GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
-
-DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
-DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
-GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
-DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
-GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
-
-DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
-DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
-GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
-
-#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v6, t1; \
- PUNPCKLQDQ v6, t2; \
- PUNPCKHQDQ v7, v6; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ v7, t2; \
- MOVO t1, v7; \
- MOVO v2, t1; \
- PUNPCKHQDQ t2, v7; \
- PUNPCKLQDQ v3, t2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v3
-
-#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
- MOVO v4, t1; \
- MOVO v5, v4; \
- MOVO t1, v5; \
- MOVO v2, t1; \
- PUNPCKLQDQ v2, t2; \
- PUNPCKHQDQ v3, v2; \
- PUNPCKHQDQ t2, v2; \
- PUNPCKLQDQ v3, t2; \
- MOVO t1, v3; \
- MOVO v6, t1; \
- PUNPCKHQDQ t2, v3; \
- PUNPCKLQDQ v7, t2; \
- PUNPCKHQDQ t2, v6; \
- PUNPCKLQDQ t1, t2; \
- PUNPCKHQDQ t2, v7
-
-#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
- PADDQ m0, v0; \
- PADDQ m1, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFD $0xB1, v6, v6; \
- PSHUFD $0xB1, v7, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- PSHUFB c40, v2; \
- PSHUFB c40, v3; \
- PADDQ m2, v0; \
- PADDQ m3, v1; \
- PADDQ v2, v0; \
- PADDQ v3, v1; \
- PXOR v0, v6; \
- PXOR v1, v7; \
- PSHUFB c48, v6; \
- PSHUFB c48, v7; \
- PADDQ v6, v4; \
- PADDQ v7, v5; \
- PXOR v4, v2; \
- PXOR v5, v3; \
- MOVOU v2, t0; \
- PADDQ v2, t0; \
- PSRLQ $63, v2; \
- PXOR t0, v2; \
- MOVOU v3, t0; \
- PADDQ v3, t0; \
- PSRLQ $63, v3; \
- PXOR t0, v3
-
-#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
- MOVQ i0*8(src), m0; \
- PINSRQ $1, i1*8(src), m0; \
- MOVQ i2*8(src), m1; \
- PINSRQ $1, i3*8(src), m1; \
- MOVQ i4*8(src), m2; \
- PINSRQ $1, i5*8(src), m2; \
- MOVQ i6*8(src), m3; \
- PINSRQ $1, i7*8(src), m3
-
// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
-TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
- MOVQ h+0(FP), AX
- MOVQ c+8(FP), BX
- MOVQ flag+16(FP), CX
- MOVQ blocks_base+24(FP), SI
- MOVQ blocks_len+32(FP), DI
-
- MOVQ SP, R10
- ADDQ $15, R10
- ANDQ $~15, R10
-
- MOVOU ·iv3<>(SB), X0
- MOVO X0, 0(R10)
- XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0)
-
- MOVOU ·c40<>(SB), X13
- MOVOU ·c48<>(SB), X14
-
- MOVOU 0(AX), X12
+// Requires: SSE2, SSE4.1, SSSE3
+TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48
+ MOVQ h+0(FP), AX
+ MOVQ c+8(FP), BX
+ MOVQ flag+16(FP), CX
+ MOVQ blocks_base+24(FP), SI
+ MOVQ blocks_len+32(FP), DI
+ MOVQ SP, R10
+ ADDQ $0x0f, R10
+ ANDQ $-16, R10
+ MOVOU ·iv3<>+0(SB), X0
+ MOVO X0, (R10)
+ XORQ CX, (R10)
+ MOVOU ·c40<>+0(SB), X13
+ MOVOU ·c48<>+0(SB), X14
+ MOVOU (AX), X12
MOVOU 16(AX), X15
-
- MOVQ 0(BX), R8
- MOVQ 8(BX), R9
+ MOVQ (BX), R8
+ MOVQ 8(BX), R9
loop:
- ADDQ $128, R8
- CMPQ R8, $128
+ ADDQ $0x80, R8
+ CMPQ R8, $0x80
JGE noinc
INCQ R9
noinc:
- MOVQ R8, X8
- PINSRQ $1, R9, X8
-
- MOVO X12, X0
- MOVO X15, X1
- MOVOU 32(AX), X2
- MOVOU 48(AX), X3
- MOVOU ·iv0<>(SB), X4
- MOVOU ·iv1<>(SB), X5
- MOVOU ·iv2<>(SB), X6
-
- PXOR X8, X6
- MOVO 0(R10), X7
-
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
- MOVO X8, 16(R10)
- MOVO X9, 32(R10)
- MOVO X10, 48(R10)
- MOVO X11, 64(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
- MOVO X8, 80(R10)
- MOVO X9, 96(R10)
- MOVO X10, 112(R10)
- MOVO X11, 128(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
- MOVO X8, 144(R10)
- MOVO X9, 160(R10)
- MOVO X10, 176(R10)
- MOVO X11, 192(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
- MOVO X8, 208(R10)
- MOVO X9, 224(R10)
- MOVO X10, 240(R10)
- MOVO X11, 256(R10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
-
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+ MOVQ R8, X8
+ PINSRQ $0x01, R9, X8
+ MOVO X12, X0
+ MOVO X15, X1
+ MOVOU 32(AX), X2
+ MOVOU 48(AX), X3
+ MOVOU ·iv0<>+0(SB), X4
+ MOVOU ·iv1<>+0(SB), X5
+ MOVOU ·iv2<>+0(SB), X6
+ PXOR X8, X6
+ MOVO (R10), X7
+ MOVQ (SI), X8
+ PINSRQ $0x01, 16(SI), X8
+ MOVQ 32(SI), X9
+ PINSRQ $0x01, 48(SI), X9
+ MOVQ 8(SI), X10
+ PINSRQ $0x01, 24(SI), X10
+ MOVQ 40(SI), X11
+ PINSRQ $0x01, 56(SI), X11
+ MOVO X8, 16(R10)
+ MOVO X9, 32(R10)
+ MOVO X10, 48(R10)
+ MOVO X11, 64(R10)
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 64(SI), X8
+ PINSRQ $0x01, 80(SI), X8
+ MOVQ 96(SI), X9
+ PINSRQ $0x01, 112(SI), X9
+ MOVQ 72(SI), X10
+ PINSRQ $0x01, 88(SI), X10
+ MOVQ 104(SI), X11
+ PINSRQ $0x01, 120(SI), X11
+ MOVO X8, 80(R10)
+ MOVO X9, 96(R10)
+ MOVO X10, 112(R10)
+ MOVO X11, 128(R10)
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 112(SI), X8
+ PINSRQ $0x01, 32(SI), X8
+ MOVQ 72(SI), X9
+ PINSRQ $0x01, 104(SI), X9
+ MOVQ 80(SI), X10
+ PINSRQ $0x01, 64(SI), X10
+ MOVQ 120(SI), X11
+ PINSRQ $0x01, 48(SI), X11
+ MOVO X8, 144(R10)
+ MOVO X9, 160(R10)
+ MOVO X10, 176(R10)
+ MOVO X11, 192(R10)
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 8(SI), X8
+ PINSRQ $0x01, (SI), X8
+ MOVQ 88(SI), X9
+ PINSRQ $0x01, 40(SI), X9
+ MOVQ 96(SI), X10
+ PINSRQ $0x01, 16(SI), X10
+ MOVQ 56(SI), X11
+ PINSRQ $0x01, 24(SI), X11
+ MOVO X8, 208(R10)
+ MOVO X9, 224(R10)
+ MOVO X10, 240(R10)
+ MOVO X11, 256(R10)
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 88(SI), X8
+ PINSRQ $0x01, 96(SI), X8
+ MOVQ 40(SI), X9
+ PINSRQ $0x01, 120(SI), X9
+ MOVQ 64(SI), X10
+ PINSRQ $0x01, (SI), X10
+ MOVQ 16(SI), X11
+ PINSRQ $0x01, 104(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 80(SI), X8
+ PINSRQ $0x01, 24(SI), X8
+ MOVQ 56(SI), X9
+ PINSRQ $0x01, 72(SI), X9
+ MOVQ 112(SI), X10
+ PINSRQ $0x01, 48(SI), X10
+ MOVQ 8(SI), X11
+ PINSRQ $0x01, 32(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 56(SI), X8
+ PINSRQ $0x01, 24(SI), X8
+ MOVQ 104(SI), X9
+ PINSRQ $0x01, 88(SI), X9
+ MOVQ 72(SI), X10
+ PINSRQ $0x01, 8(SI), X10
+ MOVQ 96(SI), X11
+ PINSRQ $0x01, 112(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 16(SI), X8
+ PINSRQ $0x01, 40(SI), X8
+ MOVQ 32(SI), X9
+ PINSRQ $0x01, 120(SI), X9
+ MOVQ 48(SI), X10
+ PINSRQ $0x01, 80(SI), X10
+ MOVQ (SI), X11
+ PINSRQ $0x01, 64(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 72(SI), X8
+ PINSRQ $0x01, 40(SI), X8
+ MOVQ 16(SI), X9
+ PINSRQ $0x01, 80(SI), X9
+ MOVQ (SI), X10
+ PINSRQ $0x01, 56(SI), X10
+ MOVQ 32(SI), X11
+ PINSRQ $0x01, 120(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 112(SI), X8
+ PINSRQ $0x01, 88(SI), X8
+ MOVQ 48(SI), X9
+ PINSRQ $0x01, 24(SI), X9
+ MOVQ 8(SI), X10
+ PINSRQ $0x01, 96(SI), X10
+ MOVQ 64(SI), X11
+ PINSRQ $0x01, 104(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 16(SI), X8
+ PINSRQ $0x01, 48(SI), X8
+ MOVQ (SI), X9
+ PINSRQ $0x01, 64(SI), X9
+ MOVQ 96(SI), X10
+ PINSRQ $0x01, 80(SI), X10
+ MOVQ 88(SI), X11
+ PINSRQ $0x01, 24(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 32(SI), X8
+ PINSRQ $0x01, 56(SI), X8
+ MOVQ 120(SI), X9
+ PINSRQ $0x01, 8(SI), X9
+ MOVQ 104(SI), X10
+ PINSRQ $0x01, 40(SI), X10
+ MOVQ 112(SI), X11
+ PINSRQ $0x01, 72(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 96(SI), X8
+ PINSRQ $0x01, 8(SI), X8
+ MOVQ 112(SI), X9
+ PINSRQ $0x01, 32(SI), X9
+ MOVQ 40(SI), X10
+ PINSRQ $0x01, 120(SI), X10
+ MOVQ 104(SI), X11
+ PINSRQ $0x01, 80(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ (SI), X8
+ PINSRQ $0x01, 48(SI), X8
+ MOVQ 72(SI), X9
+ PINSRQ $0x01, 64(SI), X9
+ MOVQ 56(SI), X10
+ PINSRQ $0x01, 24(SI), X10
+ MOVQ 16(SI), X11
+ PINSRQ $0x01, 88(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 104(SI), X8
+ PINSRQ $0x01, 56(SI), X8
+ MOVQ 96(SI), X9
+ PINSRQ $0x01, 24(SI), X9
+ MOVQ 88(SI), X10
+ PINSRQ $0x01, 112(SI), X10
+ MOVQ 8(SI), X11
+ PINSRQ $0x01, 72(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 40(SI), X8
+ PINSRQ $0x01, 120(SI), X8
+ MOVQ 64(SI), X9
+ PINSRQ $0x01, 16(SI), X9
+ MOVQ (SI), X10
+ PINSRQ $0x01, 32(SI), X10
+ MOVQ 48(SI), X11
+ PINSRQ $0x01, 80(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 48(SI), X8
+ PINSRQ $0x01, 112(SI), X8
+ MOVQ 88(SI), X9
+ PINSRQ $0x01, (SI), X9
+ MOVQ 120(SI), X10
+ PINSRQ $0x01, 72(SI), X10
+ MOVQ 24(SI), X11
+ PINSRQ $0x01, 64(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 96(SI), X8
+ PINSRQ $0x01, 104(SI), X8
+ MOVQ 8(SI), X9
+ PINSRQ $0x01, 80(SI), X9
+ MOVQ 16(SI), X10
+ PINSRQ $0x01, 56(SI), X10
+ MOVQ 32(SI), X11
+ PINSRQ $0x01, 40(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVQ 80(SI), X8
+ PINSRQ $0x01, 64(SI), X8
+ MOVQ 56(SI), X9
+ PINSRQ $0x01, 8(SI), X9
+ MOVQ 16(SI), X10
+ PINSRQ $0x01, 32(SI), X10
+ MOVQ 48(SI), X11
+ PINSRQ $0x01, 40(SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ MOVQ 120(SI), X8
+ PINSRQ $0x01, 72(SI), X8
+ MOVQ 24(SI), X9
+ PINSRQ $0x01, 104(SI), X9
+ MOVQ 88(SI), X10
+ PINSRQ $0x01, 112(SI), X10
+ MOVQ 96(SI), X11
+ PINSRQ $0x01, (SI), X11
+ PADDQ X8, X0
+ PADDQ X9, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ X10, X0
+ PADDQ X11, X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ PADDQ 16(R10), X0
+ PADDQ 32(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ 48(R10), X0
+ PADDQ 64(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ PADDQ 80(R10), X0
+ PADDQ 96(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ 112(R10), X0
+ PADDQ 128(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ PADDQ 144(R10), X0
+ PADDQ 160(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ 176(R10), X0
+ PADDQ 192(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X6, X8
+ PUNPCKLQDQ X6, X9
+ PUNPCKHQDQ X7, X6
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X7, X9
+ MOVO X8, X7
+ MOVO X2, X8
+ PUNPCKHQDQ X9, X7
+ PUNPCKLQDQ X3, X9
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X3
+ PADDQ 208(R10), X0
+ PADDQ 224(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFD $0xb1, X6, X6
+ PSHUFD $0xb1, X7, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ PSHUFB X13, X2
+ PSHUFB X13, X3
+ PADDQ 240(R10), X0
+ PADDQ 256(R10), X1
+ PADDQ X2, X0
+ PADDQ X3, X1
+ PXOR X0, X6
+ PXOR X1, X7
+ PSHUFB X14, X6
+ PSHUFB X14, X7
+ PADDQ X6, X4
+ PADDQ X7, X5
+ PXOR X4, X2
+ PXOR X5, X3
+ MOVOU X2, X11
+ PADDQ X2, X11
+ PSRLQ $0x3f, X2
+ PXOR X11, X2
+ MOVOU X3, X11
+ PADDQ X3, X11
+ PSRLQ $0x3f, X3
+ PXOR X11, X3
+ MOVO X4, X8
+ MOVO X5, X4
+ MOVO X8, X5
+ MOVO X2, X8
+ PUNPCKLQDQ X2, X9
+ PUNPCKHQDQ X3, X2
+ PUNPCKHQDQ X9, X2
+ PUNPCKLQDQ X3, X9
+ MOVO X8, X3
+ MOVO X6, X8
+ PUNPCKHQDQ X9, X3
+ PUNPCKLQDQ X7, X9
+ PUNPCKHQDQ X9, X6
+ PUNPCKLQDQ X8, X9
+ PUNPCKHQDQ X9, X7
+ MOVOU 32(AX), X10
+ MOVOU 48(AX), X11
+ PXOR X0, X12
+ PXOR X1, X15
+ PXOR X2, X10
+ PXOR X3, X11
+ PXOR X4, X12
+ PXOR X5, X15
+ PXOR X6, X10
+ PXOR X7, X11
+ MOVOU X10, 32(AX)
+ MOVOU X11, 48(AX)
+ LEAQ 128(SI), SI
+ SUBQ $0x80, DI
+ JNE loop
+ MOVOU X12, (AX)
+ MOVOU X15, 16(AX)
+ MOVQ R8, (BX)
+ MOVQ R9, 8(BX)
+ RET
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14)
- SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
- HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14)
- SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
+DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b
+DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179
+GLOBL ·iv3<>(SB), RODATA|NOPTR, $16
- MOVOU 32(AX), X10
- MOVOU 48(AX), X11
- PXOR X0, X12
- PXOR X1, X15
- PXOR X2, X10
- PXOR X3, X11
- PXOR X4, X12
- PXOR X5, X15
- PXOR X6, X10
- PXOR X7, X11
- MOVOU X10, 32(AX)
- MOVOU X11, 48(AX)
+DATA ·c40<>+0(SB)/8, $0x0201000706050403
+DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b
+GLOBL ·c40<>(SB), RODATA|NOPTR, $16
- LEAQ 128(SI), SI
- SUBQ $128, DI
- JNE loop
+DATA ·c48<>+0(SB)/8, $0x0100070605040302
+DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a
+GLOBL ·c48<>(SB), RODATA|NOPTR, $16
- MOVOU X12, 0(AX)
- MOVOU X15, 16(AX)
+DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908
+DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b
+GLOBL ·iv0<>(SB), RODATA|NOPTR, $16
- MOVQ R8, 0(BX)
- MOVQ R9, 8(BX)
+DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b
+DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1
+GLOBL ·iv1<>(SB), RODATA|NOPTR, $16
- RET
+DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1
+DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f
+GLOBL ·iv2<>(SB), RODATA|NOPTR, $16
diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
index 1f539388..99e2f16e 100644
--- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
+++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
@@ -1,390 +1,5419 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
+// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT.
//go:build amd64 && !purego && gc
-// This code was translated into a form compatible with 6a from the public
-// domain sources at https://github.com/gvanas/KeccakCodePackage
-
-// Offsets in state
-#define _ba (0*8)
-#define _be (1*8)
-#define _bi (2*8)
-#define _bo (3*8)
-#define _bu (4*8)
-#define _ga (5*8)
-#define _ge (6*8)
-#define _gi (7*8)
-#define _go (8*8)
-#define _gu (9*8)
-#define _ka (10*8)
-#define _ke (11*8)
-#define _ki (12*8)
-#define _ko (13*8)
-#define _ku (14*8)
-#define _ma (15*8)
-#define _me (16*8)
-#define _mi (17*8)
-#define _mo (18*8)
-#define _mu (19*8)
-#define _sa (20*8)
-#define _se (21*8)
-#define _si (22*8)
-#define _so (23*8)
-#define _su (24*8)
-
-// Temporary registers
-#define rT1 AX
-
-// Round vars
-#define rpState DI
-#define rpStack SP
-
-#define rDa BX
-#define rDe CX
-#define rDi DX
-#define rDo R8
-#define rDu R9
-
-#define rBa R10
-#define rBe R11
-#define rBi R12
-#define rBo R13
-#define rBu R14
-
-#define rCa SI
-#define rCe BP
-#define rCi rBi
-#define rCo rBo
-#define rCu R15
-
-#define MOVQ_RBI_RCE MOVQ rBi, rCe
-#define XORQ_RT1_RCA XORQ rT1, rCa
-#define XORQ_RT1_RCE XORQ rT1, rCe
-#define XORQ_RBA_RCU XORQ rBa, rCu
-#define XORQ_RBE_RCU XORQ rBe, rCu
-#define XORQ_RDU_RCU XORQ rDu, rCu
-#define XORQ_RDA_RCA XORQ rDa, rCa
-#define XORQ_RDE_RCE XORQ rDe, rCe
-
-#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
- /* Prepare round */ \
- MOVQ rCe, rDa; \
- ROLQ $1, rDa; \
- \
- MOVQ _bi(iState), rCi; \
- XORQ _gi(iState), rDi; \
- XORQ rCu, rDa; \
- XORQ _ki(iState), rCi; \
- XORQ _mi(iState), rDi; \
- XORQ rDi, rCi; \
- \
- MOVQ rCi, rDe; \
- ROLQ $1, rDe; \
- \
- MOVQ _bo(iState), rCo; \
- XORQ _go(iState), rDo; \
- XORQ rCa, rDe; \
- XORQ _ko(iState), rCo; \
- XORQ _mo(iState), rDo; \
- XORQ rDo, rCo; \
- \
- MOVQ rCo, rDi; \
- ROLQ $1, rDi; \
- \
- MOVQ rCu, rDo; \
- XORQ rCe, rDi; \
- ROLQ $1, rDo; \
- \
- MOVQ rCa, rDu; \
- XORQ rCi, rDo; \
- ROLQ $1, rDu; \
- \
- /* Result b */ \
- MOVQ _ba(iState), rBa; \
- MOVQ _ge(iState), rBe; \
- XORQ rCo, rDu; \
- MOVQ _ki(iState), rBi; \
- MOVQ _mo(iState), rBo; \
- MOVQ _su(iState), rBu; \
- XORQ rDe, rBe; \
- ROLQ $44, rBe; \
- XORQ rDi, rBi; \
- XORQ rDa, rBa; \
- ROLQ $43, rBi; \
- \
- MOVQ rBe, rCa; \
- MOVQ rc, rT1; \
- ORQ rBi, rCa; \
- XORQ rBa, rT1; \
- XORQ rT1, rCa; \
- MOVQ rCa, _ba(oState); \
- \
- XORQ rDu, rBu; \
- ROLQ $14, rBu; \
- MOVQ rBa, rCu; \
- ANDQ rBe, rCu; \
- XORQ rBu, rCu; \
- MOVQ rCu, _bu(oState); \
- \
- XORQ rDo, rBo; \
- ROLQ $21, rBo; \
- MOVQ rBo, rT1; \
- ANDQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _bi(oState); \
- \
- NOTQ rBi; \
- ORQ rBa, rBu; \
- ORQ rBo, rBi; \
- XORQ rBo, rBu; \
- XORQ rBe, rBi; \
- MOVQ rBu, _bo(oState); \
- MOVQ rBi, _be(oState); \
- B_RBI_RCE; \
- \
- /* Result g */ \
- MOVQ _gu(iState), rBe; \
- XORQ rDu, rBe; \
- MOVQ _ka(iState), rBi; \
- ROLQ $20, rBe; \
- XORQ rDa, rBi; \
- ROLQ $3, rBi; \
- MOVQ _bo(iState), rBa; \
- MOVQ rBe, rT1; \
- ORQ rBi, rT1; \
- XORQ rDo, rBa; \
- MOVQ _me(iState), rBo; \
- MOVQ _si(iState), rBu; \
- ROLQ $28, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ga(oState); \
- G_RT1_RCA; \
- \
- XORQ rDe, rBo; \
- ROLQ $45, rBo; \
- MOVQ rBi, rT1; \
- ANDQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _ge(oState); \
- G_RT1_RCE; \
- \
- XORQ rDi, rBu; \
- ROLQ $61, rBu; \
- MOVQ rBu, rT1; \
- ORQ rBa, rT1; \
- XORQ rBo, rT1; \
- MOVQ rT1, _go(oState); \
- \
- ANDQ rBe, rBa; \
- XORQ rBu, rBa; \
- MOVQ rBa, _gu(oState); \
- NOTQ rBu; \
- G_RBA_RCU; \
- \
- ORQ rBu, rBo; \
- XORQ rBi, rBo; \
- MOVQ rBo, _gi(oState); \
- \
- /* Result k */ \
- MOVQ _be(iState), rBa; \
- MOVQ _gi(iState), rBe; \
- MOVQ _ko(iState), rBi; \
- MOVQ _mu(iState), rBo; \
- MOVQ _sa(iState), rBu; \
- XORQ rDi, rBe; \
- ROLQ $6, rBe; \
- XORQ rDo, rBi; \
- ROLQ $25, rBi; \
- MOVQ rBe, rT1; \
- ORQ rBi, rT1; \
- XORQ rDe, rBa; \
- ROLQ $1, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ka(oState); \
- K_RT1_RCA; \
- \
- XORQ rDu, rBo; \
- ROLQ $8, rBo; \
- MOVQ rBi, rT1; \
- ANDQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _ke(oState); \
- K_RT1_RCE; \
- \
- XORQ rDa, rBu; \
- ROLQ $18, rBu; \
- NOTQ rBo; \
- MOVQ rBo, rT1; \
- ANDQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _ki(oState); \
- \
- MOVQ rBu, rT1; \
- ORQ rBa, rT1; \
- XORQ rBo, rT1; \
- MOVQ rT1, _ko(oState); \
- \
- ANDQ rBe, rBa; \
- XORQ rBu, rBa; \
- MOVQ rBa, _ku(oState); \
- K_RBA_RCU; \
- \
- /* Result m */ \
- MOVQ _ga(iState), rBe; \
- XORQ rDa, rBe; \
- MOVQ _ke(iState), rBi; \
- ROLQ $36, rBe; \
- XORQ rDe, rBi; \
- MOVQ _bu(iState), rBa; \
- ROLQ $10, rBi; \
- MOVQ rBe, rT1; \
- MOVQ _mi(iState), rBo; \
- ANDQ rBi, rT1; \
- XORQ rDu, rBa; \
- MOVQ _so(iState), rBu; \
- ROLQ $27, rBa; \
- XORQ rBa, rT1; \
- MOVQ rT1, _ma(oState); \
- M_RT1_RCA; \
- \
- XORQ rDi, rBo; \
- ROLQ $15, rBo; \
- MOVQ rBi, rT1; \
- ORQ rBo, rT1; \
- XORQ rBe, rT1; \
- MOVQ rT1, _me(oState); \
- M_RT1_RCE; \
- \
- XORQ rDo, rBu; \
- ROLQ $56, rBu; \
- NOTQ rBo; \
- MOVQ rBo, rT1; \
- ORQ rBu, rT1; \
- XORQ rBi, rT1; \
- MOVQ rT1, _mi(oState); \
- \
- ORQ rBa, rBe; \
- XORQ rBu, rBe; \
- MOVQ rBe, _mu(oState); \
- \
- ANDQ rBa, rBu; \
- XORQ rBo, rBu; \
- MOVQ rBu, _mo(oState); \
- M_RBE_RCU; \
- \
- /* Result s */ \
- MOVQ _bi(iState), rBa; \
- MOVQ _go(iState), rBe; \
- MOVQ _ku(iState), rBi; \
- XORQ rDi, rBa; \
- MOVQ _ma(iState), rBo; \
- ROLQ $62, rBa; \
- XORQ rDo, rBe; \
- MOVQ _se(iState), rBu; \
- ROLQ $55, rBe; \
- \
- XORQ rDu, rBi; \
- MOVQ rBa, rDu; \
- XORQ rDe, rBu; \
- ROLQ $2, rBu; \
- ANDQ rBe, rDu; \
- XORQ rBu, rDu; \
- MOVQ rDu, _su(oState); \
- \
- ROLQ $39, rBi; \
- S_RDU_RCU; \
- NOTQ rBe; \
- XORQ rDa, rBo; \
- MOVQ rBe, rDa; \
- ANDQ rBi, rDa; \
- XORQ rBa, rDa; \
- MOVQ rDa, _sa(oState); \
- S_RDA_RCA; \
- \
- ROLQ $41, rBo; \
- MOVQ rBi, rDe; \
- ORQ rBo, rDe; \
- XORQ rBe, rDe; \
- MOVQ rDe, _se(oState); \
- S_RDE_RCE; \
- \
- MOVQ rBo, rDi; \
- MOVQ rBu, rDo; \
- ANDQ rBu, rDi; \
- ORQ rBa, rDo; \
- XORQ rBi, rDi; \
- XORQ rBo, rDo; \
- MOVQ rDi, _si(oState); \
- MOVQ rDo, _so(oState) \
-
// func keccakF1600(a *[25]uint64)
-TEXT ·keccakF1600(SB), 0, $200-8
- MOVQ a+0(FP), rpState
+TEXT ·keccakF1600(SB), $200-8
+ MOVQ a+0(FP), DI
// Convert the user state into an internal state
- NOTQ _be(rpState)
- NOTQ _bi(rpState)
- NOTQ _go(rpState)
- NOTQ _ki(rpState)
- NOTQ _mi(rpState)
- NOTQ _sa(rpState)
+ NOTQ 8(DI)
+ NOTQ 16(DI)
+ NOTQ 64(DI)
+ NOTQ 96(DI)
+ NOTQ 136(DI)
+ NOTQ 160(DI)
// Execute the KeccakF permutation
- MOVQ _ba(rpState), rCa
- MOVQ _be(rpState), rCe
- MOVQ _bu(rpState), rCu
-
- XORQ _ga(rpState), rCa
- XORQ _ge(rpState), rCe
- XORQ _gu(rpState), rCu
-
- XORQ _ka(rpState), rCa
- XORQ _ke(rpState), rCe
- XORQ _ku(rpState), rCu
-
- XORQ _ma(rpState), rCa
- XORQ _me(rpState), rCe
- XORQ _mu(rpState), rCu
-
- XORQ _sa(rpState), rCa
- XORQ _se(rpState), rCe
- MOVQ _si(rpState), rDi
- MOVQ _so(rpState), rDo
- XORQ _su(rpState), rCu
-
- mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
- mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
+ MOVQ (DI), SI
+ MOVQ 8(DI), BP
+ MOVQ 32(DI), R15
+ XORQ 40(DI), SI
+ XORQ 48(DI), BP
+ XORQ 72(DI), R15
+ XORQ 80(DI), SI
+ XORQ 88(DI), BP
+ XORQ 112(DI), R15
+ XORQ 120(DI), SI
+ XORQ 128(DI), BP
+ XORQ 152(DI), R15
+ XORQ 160(DI), SI
+ XORQ 168(DI), BP
+ MOVQ 176(DI), DX
+ MOVQ 184(DI), R8
+ XORQ 192(DI), R15
- // Revert the internal state to the user state
- NOTQ _be(rpState)
- NOTQ _bi(rpState)
- NOTQ _go(rpState)
- NOTQ _ki(rpState)
- NOTQ _mi(rpState)
- NOTQ _sa(rpState)
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000000000001, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000000008082, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x800000000000808a, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000080008000, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x000000000000808b, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000080000001, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000080008081, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000008009, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x000000000000008a, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000000000088, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000080008009, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x000000008000000a, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x000000008000808b, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x800000000000008b, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000008089, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000008003, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000008002, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000000080, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x000000000000800a, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x800000008000000a, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000080008081, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000000008080, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(DI), R12
+ XORQ 56(DI), DX
+ XORQ R15, BX
+ XORQ 96(DI), R12
+ XORQ 136(DI), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(DI), R13
+ XORQ 64(DI), R8
+ XORQ SI, CX
+ XORQ 104(DI), R13
+ XORQ 144(DI), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (DI), R10
+ MOVQ 48(DI), R11
+ XORQ R13, R9
+ MOVQ 96(DI), R12
+ MOVQ 144(DI), R13
+ MOVQ 192(DI), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x0000000080000001, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (SP)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(SP)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(SP)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(SP)
+ MOVQ R12, 8(SP)
+ MOVQ R12, BP
+
+ // Result g
+ MOVQ 72(DI), R11
+ XORQ R9, R11
+ MOVQ 80(DI), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(DI), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(DI), R13
+ MOVQ 176(DI), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(SP)
+ XORQ AX, SI
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(SP)
+ XORQ AX, BP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(SP)
+ NOTQ R14
+ XORQ R10, R15
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(SP)
+
+ // Result k
+ MOVQ 8(DI), R10
+ MOVQ 56(DI), R11
+ MOVQ 104(DI), R12
+ MOVQ 152(DI), R13
+ MOVQ 160(DI), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(SP)
+ XORQ AX, SI
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(SP)
+ XORQ AX, BP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(SP)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(SP)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(SP)
+ XORQ R10, R15
+
+ // Result m
+ MOVQ 40(DI), R11
+ XORQ BX, R11
+ MOVQ 88(DI), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(DI), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(DI), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(DI), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(SP)
+ XORQ AX, SI
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(SP)
+ XORQ AX, BP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(SP)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(SP)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(SP)
+ XORQ R11, R15
+
+ // Result s
+ MOVQ 16(DI), R10
+ MOVQ 64(DI), R11
+ MOVQ 112(DI), R12
+ XORQ DX, R10
+ MOVQ 120(DI), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(DI), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(SP)
+ ROLQ $0x27, R12
+ XORQ R9, R15
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(SP)
+ XORQ BX, SI
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(SP)
+ XORQ CX, BP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(SP)
+ MOVQ R8, 184(SP)
+
+ // Prepare round
+ MOVQ BP, BX
+ ROLQ $0x01, BX
+ MOVQ 16(SP), R12
+ XORQ 56(SP), DX
+ XORQ R15, BX
+ XORQ 96(SP), R12
+ XORQ 136(SP), DX
+ XORQ DX, R12
+ MOVQ R12, CX
+ ROLQ $0x01, CX
+ MOVQ 24(SP), R13
+ XORQ 64(SP), R8
+ XORQ SI, CX
+ XORQ 104(SP), R13
+ XORQ 144(SP), R8
+ XORQ R8, R13
+ MOVQ R13, DX
+ ROLQ $0x01, DX
+ MOVQ R15, R8
+ XORQ BP, DX
+ ROLQ $0x01, R8
+ MOVQ SI, R9
+ XORQ R12, R8
+ ROLQ $0x01, R9
+
+ // Result b
+ MOVQ (SP), R10
+ MOVQ 48(SP), R11
+ XORQ R13, R9
+ MOVQ 96(SP), R12
+ MOVQ 144(SP), R13
+ MOVQ 192(SP), R14
+ XORQ CX, R11
+ ROLQ $0x2c, R11
+ XORQ DX, R12
+ XORQ BX, R10
+ ROLQ $0x2b, R12
+ MOVQ R11, SI
+ MOVQ $0x8000000080008008, AX
+ ORQ R12, SI
+ XORQ R10, AX
+ XORQ AX, SI
+ MOVQ SI, (DI)
+ XORQ R9, R14
+ ROLQ $0x0e, R14
+ MOVQ R10, R15
+ ANDQ R11, R15
+ XORQ R14, R15
+ MOVQ R15, 32(DI)
+ XORQ R8, R13
+ ROLQ $0x15, R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 16(DI)
+ NOTQ R12
+ ORQ R10, R14
+ ORQ R13, R12
+ XORQ R13, R14
+ XORQ R11, R12
+ MOVQ R14, 24(DI)
+ MOVQ R12, 8(DI)
+ NOP
+
+ // Result g
+ MOVQ 72(SP), R11
+ XORQ R9, R11
+ MOVQ 80(SP), R12
+ ROLQ $0x14, R11
+ XORQ BX, R12
+ ROLQ $0x03, R12
+ MOVQ 24(SP), R10
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ R8, R10
+ MOVQ 128(SP), R13
+ MOVQ 176(SP), R14
+ ROLQ $0x1c, R10
+ XORQ R10, AX
+ MOVQ AX, 40(DI)
+ NOP
+ XORQ CX, R13
+ ROLQ $0x2d, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 48(DI)
+ NOP
+ XORQ DX, R14
+ ROLQ $0x3d, R14
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 64(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 72(DI)
+ NOTQ R14
+ NOP
+ ORQ R14, R13
+ XORQ R12, R13
+ MOVQ R13, 56(DI)
+
+ // Result k
+ MOVQ 8(SP), R10
+ MOVQ 56(SP), R11
+ MOVQ 104(SP), R12
+ MOVQ 152(SP), R13
+ MOVQ 160(SP), R14
+ XORQ DX, R11
+ ROLQ $0x06, R11
+ XORQ R8, R12
+ ROLQ $0x19, R12
+ MOVQ R11, AX
+ ORQ R12, AX
+ XORQ CX, R10
+ ROLQ $0x01, R10
+ XORQ R10, AX
+ MOVQ AX, 80(DI)
+ NOP
+ XORQ R9, R13
+ ROLQ $0x08, R13
+ MOVQ R12, AX
+ ANDQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 88(DI)
+ NOP
+ XORQ BX, R14
+ ROLQ $0x12, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ANDQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 96(DI)
+ MOVQ R14, AX
+ ORQ R10, AX
+ XORQ R13, AX
+ MOVQ AX, 104(DI)
+ ANDQ R11, R10
+ XORQ R14, R10
+ MOVQ R10, 112(DI)
+ NOP
+
+ // Result m
+ MOVQ 40(SP), R11
+ XORQ BX, R11
+ MOVQ 88(SP), R12
+ ROLQ $0x24, R11
+ XORQ CX, R12
+ MOVQ 32(SP), R10
+ ROLQ $0x0a, R12
+ MOVQ R11, AX
+ MOVQ 136(SP), R13
+ ANDQ R12, AX
+ XORQ R9, R10
+ MOVQ 184(SP), R14
+ ROLQ $0x1b, R10
+ XORQ R10, AX
+ MOVQ AX, 120(DI)
+ NOP
+ XORQ DX, R13
+ ROLQ $0x0f, R13
+ MOVQ R12, AX
+ ORQ R13, AX
+ XORQ R11, AX
+ MOVQ AX, 128(DI)
+ NOP
+ XORQ R8, R14
+ ROLQ $0x38, R14
+ NOTQ R13
+ MOVQ R13, AX
+ ORQ R14, AX
+ XORQ R12, AX
+ MOVQ AX, 136(DI)
+ ORQ R10, R11
+ XORQ R14, R11
+ MOVQ R11, 152(DI)
+ ANDQ R10, R14
+ XORQ R13, R14
+ MOVQ R14, 144(DI)
+ NOP
+
+ // Result s
+ MOVQ 16(SP), R10
+ MOVQ 64(SP), R11
+ MOVQ 112(SP), R12
+ XORQ DX, R10
+ MOVQ 120(SP), R13
+ ROLQ $0x3e, R10
+ XORQ R8, R11
+ MOVQ 168(SP), R14
+ ROLQ $0x37, R11
+ XORQ R9, R12
+ MOVQ R10, R9
+ XORQ CX, R14
+ ROLQ $0x02, R14
+ ANDQ R11, R9
+ XORQ R14, R9
+ MOVQ R9, 192(DI)
+ ROLQ $0x27, R12
+ NOP
+ NOTQ R11
+ XORQ BX, R13
+ MOVQ R11, BX
+ ANDQ R12, BX
+ XORQ R10, BX
+ MOVQ BX, 160(DI)
+ NOP
+ ROLQ $0x29, R13
+ MOVQ R12, CX
+ ORQ R13, CX
+ XORQ R11, CX
+ MOVQ CX, 168(DI)
+ NOP
+ MOVQ R13, DX
+ MOVQ R14, R8
+ ANDQ R14, DX
+ ORQ R10, R8
+ XORQ R12, DX
+ XORQ R13, R8
+ MOVQ DX, 176(DI)
+ MOVQ R8, 184(DI)
+
+ // Revert the internal state to the user state
+ NOTQ 8(DI)
+ NOTQ 16(DI)
+ NOTQ 64(DI)
+ NOTQ 96(DI)
+ NOTQ 136(DI)
+ NOTQ 160(DI)
RET
diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go
index 1ea9275b..a01ef435 100644
--- a/vendor/golang.org/x/crypto/sha3/shake.go
+++ b/vendor/golang.org/x/crypto/sha3/shake.go
@@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
// leftEncode returns max 9 bytes
c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...)
c.initBlock = append(c.initBlock, N...)
- c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...)
c.initBlock = append(c.initBlock, S...)
c.Write(bytepad(c.initBlock, c.rate))
return &c
diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/mod/LICENSE
+++ b/vendor/golang.org/x/mod/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/net/LICENSE
+++ b/vendor/golang.org/x/net/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
new file mode 100644
index 00000000..de58dfb8
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -0,0 +1,122 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "math"
+ "net/http"
+ "time"
+)
+
+// http2Config is a package-internal version of net/http.HTTP2Config.
+//
+// http.HTTP2Config was added in Go 1.24.
+// When running with a version of net/http that includes HTTP2Config,
+// we merge the configuration with the fields in Transport or Server
+// to produce an http2Config.
+//
+// Zero valued fields in http2Config are interpreted as in the
+// net/http.HTTPConfig documentation.
+//
+// Precedence order for reconciling configurations is:
+//
+// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
+// - Otherwise use the http2.{Server.Transport} value.
+// - If the resulting value is zero or out of range, use a default.
+type http2Config struct {
+ MaxConcurrentStreams uint32
+ MaxDecoderHeaderTableSize uint32
+ MaxEncoderHeaderTableSize uint32
+ MaxReadFrameSize uint32
+ MaxUploadBufferPerConnection int32
+ MaxUploadBufferPerStream int32
+ SendPingTimeout time.Duration
+ PingTimeout time.Duration
+ WriteByteTimeout time.Duration
+ PermitProhibitedCipherSuites bool
+ CountError func(errType string)
+}
+
+// configFromServer merges configuration settings from
+// net/http.Server.HTTP2Config and http2.Server.
+func configFromServer(h1 *http.Server, h2 *Server) http2Config {
+ conf := http2Config{
+ MaxConcurrentStreams: h2.MaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
+ MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
+ CountError: h2.CountError,
+ }
+ fillNetHTTPServerConfig(&conf, h1)
+ setConfigDefaults(&conf, true)
+ return conf
+}
+
+// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// (the net/http Transport).
+func configFromTransport(h2 *Transport) http2Config {
+ conf := http2Config{
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
+ }
+
+ // Unlike most config fields, where out-of-range values revert to the default,
+ // Transport.MaxReadFrameSize clips.
+ if conf.MaxReadFrameSize < minMaxFrameSize {
+ conf.MaxReadFrameSize = minMaxFrameSize
+ } else if conf.MaxReadFrameSize > maxFrameSize {
+ conf.MaxReadFrameSize = maxFrameSize
+ }
+
+ if h2.t1 != nil {
+ fillNetHTTPTransportConfig(&conf, h2.t1)
+ }
+ setConfigDefaults(&conf, false)
+ return conf
+}
+
+func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
+ if *v < minval || *v > maxval {
+ *v = defval
+ }
+}
+
+func setConfigDefaults(conf *http2Config, server bool) {
+ setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
+ setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
+ if server {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
+ }
+ if server {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
+ } else {
+ setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
+ }
+ setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
+ setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
+}
+
+// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
+// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
+func adjustHTTP1MaxHeaderSize(n int64) int64 {
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return n + typicalHeaders*perFieldOverhead
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
new file mode 100644
index 00000000..e3784123
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -0,0 +1,61 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.24
+
+package http2
+
+import "net/http"
+
+// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
+ fillNetHTTPConfig(conf, srv.HTTP2)
+}
+
+// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
+ fillNetHTTPConfig(conf, tr.HTTP2)
+}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
new file mode 100644
index 00000000..060fd6c6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_pre_go124.go
@@ -0,0 +1,16 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.24
+
+package http2
+
+import "net/http"
+
+// Pre-Go 1.24 fallback.
+// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
+
+func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
+
+func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 003e649f..7688c356 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -19,8 +19,9 @@ import (
"bufio"
"context"
"crypto/tls"
+ "errors"
"fmt"
- "io"
+ "net"
"net/http"
"os"
"sort"
@@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() {
// Its buffered writer is lazily allocated as needed, to minimize
// idle memory usage with many connections.
type bufferedWriter struct {
- _ incomparable
- w io.Writer // immutable
- bw *bufio.Writer // non-nil when data is buffered
+ _ incomparable
+ group synctestGroupInterface // immutable
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(w io.Writer) *bufferedWriter {
- return &bufferedWriter{w: w}
+func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+ return &bufferedWriter{
+ group: group,
+ conn: conn,
+ byteTimeout: timeout,
+ }
}
// bufWriterPoolBufferSize is the size of bufio.Writer's
@@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
- bw.Reset(w.w)
+ bw.Reset((*bufferedWriterTimeoutWriter)(w))
w.bw = bw
}
return w.bw.Write(p)
@@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
return err
}
+type bufferedWriterTimeoutWriter bufferedWriter
+
+func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
+ return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+}
+
+// writeWithByteTimeout writes to conn.
+// If more than timeout passes without any bytes being written to the connection,
+// the write fails.
+func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+ if timeout <= 0 {
+ return conn.Write(p)
+ }
+ for {
+ var now time.Time
+ if group == nil {
+ now = time.Now()
+ } else {
+ now = group.Now()
+ }
+ conn.SetWriteDeadline(now.Add(timeout))
+ nn, err := conn.Write(p[n:])
+ n += nn
+ if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
+ // Either we finished the write, made no progress, or hit the deadline.
+ // Whichever it is, we're done now.
+ conn.SetWriteDeadline(time.Time{})
+ return n, err
+ }
+ }
+}
+
func mustUint31(v int32) uint32 {
if v < 0 || v > 2147483647 {
panic("out of range")
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 6c349f3e..617b4a47 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -29,6 +29,7 @@ import (
"bufio"
"bytes"
"context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -52,10 +53,14 @@ import (
)
const (
- prefaceTimeout = 10 * time.Second
- firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
- handlerChunkWriteSize = 4 << 10
- defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+
+ // maxQueuedControlFrames is the maximum number of control frames like
+ // SETTINGS, PING and RST_STREAM that will be queued for writing before
+ // the connection is closed to prevent memory exhaustion attacks.
maxQueuedControlFrames = 10000
)
@@ -127,6 +132,22 @@ type Server struct {
// If zero or negative, there is no timeout.
IdleTimeout time.Duration
+ // ReadIdleTimeout is the timeout after which a health check using a ping
+ // frame will be carried out if no frame is received on the connection.
+ // If zero, no health check is performed.
+ ReadIdleTimeout time.Duration
+
+ // PingTimeout is the timeout after which the connection will be closed
+ // if a response to a ping is not received.
+ // If zero, a default of 15 seconds is used.
+ PingTimeout time.Duration
+
+ // WriteByteTimeout is the timeout after which a connection will be
+ // closed if no data can be written to it. The timeout begins when data is
+ // available to write, and is extended whenever any bytes are written.
+ // If zero or negative, there is no timeout.
+ WriteByteTimeout time.Duration
+
// MaxUploadBufferPerConnection is the size of the initial flow
// control window for each connections. The HTTP/2 spec does not
// allow this to be smaller than 65535 or larger than 2^32-1.
@@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
return timeTimer{time.AfterFunc(d, f)}
}
-func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection >= initialWindowSize {
- return s.MaxUploadBufferPerConnection
- }
- return 1 << 20
-}
-
-func (s *Server) initialStreamRecvWindowSize() int32 {
- if s.MaxUploadBufferPerStream > 0 {
- return s.MaxUploadBufferPerStream
- }
- return 1 << 20
-}
-
-func (s *Server) maxReadFrameSize() uint32 {
- if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
- return v
- }
- return defaultMaxReadFrameSize
-}
-
-func (s *Server) maxConcurrentStreams() uint32 {
- if v := s.MaxConcurrentStreams; v > 0 {
- return v
- }
- return defaultMaxStreams
-}
-
-func (s *Server) maxDecoderHeaderTableSize() uint32 {
- if v := s.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (s *Server) maxEncoderHeaderTableSize() uint32 {
- if v := s.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-// maxQueuedControlFrames is the maximum number of control frames like
-// SETTINGS, PING and RST_STREAM that will be queued for writing before
-// the connection is closed to prevent memory exhaustion attacks.
-func (s *Server) maxQueuedControlFrames() int {
- // TODO: if anybody asks, add a Server field, and remember to define the
- // behavior of negative values.
- return maxQueuedControlFrames
-}
-
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
@@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
baseCtx, cancel := serverConnBaseContext(c, opts)
defer cancel()
+ http1srv := opts.baseConfig()
+ conf := configFromServer(http1srv, s)
sc := &serverConn{
srv: s,
- hs: opts.baseConfig(),
+ hs: http1srv,
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
+ bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
- advMaxStreams: s.maxConcurrentStreams(),
+ advMaxStreams: conf.MaxConcurrentStreams,
initialStreamSendWindowSize: initialWindowSize,
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxFrameSize: initialMaxFrameSize,
+ pingTimeout: conf.PingTimeout,
+ countErrorFunc: conf.CountError,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
sc.flow.add(initialWindowSize)
sc.inflow.init(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
fr := NewFramer(sc.bw, c)
- if s.CountError != nil {
- fr.countError = s.CountError
+ if conf.CountError != nil {
+ fr.countError = conf.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
- fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
sc.framer = fr
if tc, ok := c.(connectionStater); ok {
@@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
// So for now, do nothing here again.
}
- if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
opts.UpgradeRequest = nil
}
- sc.serve()
+ sc.serve(conf)
}
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
@@ -609,6 +584,7 @@ type serverConn struct {
tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
writeSched WriteScheduler
+ countErrorFunc func(errType string)
// Everything following is owned by the serve loop; use serveG.check():
serveG goroutineLock // used to verify funcs are on serve()
@@ -628,6 +604,7 @@ type serverConn struct {
streams map[uint32]*stream
unstartedHandlers []unstartedHandler
initialStreamSendWindowSize int32
+ initialStreamRecvWindowSize int32
maxFrameSize int32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
@@ -638,9 +615,14 @@ type serverConn struct {
inGoAway bool // we've started to or sent GOAWAY
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ pingSent bool
+ sentPingData [8]byte
goAwayCode ErrCode
shutdownTimer timer // nil until used
idleTimer timer // nil if unused
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
+ readIdleTimer timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
if n <= 0 {
n = http.DefaultMaxHeaderBytes
}
- // http2's count is in a slightly different unit and includes 32 bytes per pair.
- // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
- const perFieldOverhead = 32 // per http2 spec
- const typicalHeaders = 10 // conservative
- return uint32(n + typicalHeaders*perFieldOverhead)
+ return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
}
func (sc *serverConn) curOpenStreams() uint32 {
@@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
}
}
-func (sc *serverConn) serve() {
+func (sc *serverConn) serve(conf http2Config) {
sc.serveG.check()
defer sc.notePanic()
defer sc.conn.Close()
@@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
sc.writeFrame(FrameWriteRequest{
write: writeSettings{
- {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
- {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
},
})
sc.unackedSettings++
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
sc.sendWindowUpdate(nil, int(diff))
}
@@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
defer sc.idleTimer.Stop()
}
+ if conf.SendPingTimeout > 0 {
+ sc.readIdleTimeout = conf.SendPingTimeout
+ sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ defer sc.readIdleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
+ lastFrameTime := sc.srv.now()
loopNum := 0
for {
loopNum++
@@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
+ lastFrameTime = sc.srv.now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
case idleTimerMsg:
sc.vlogf("connection is idle")
sc.goAway(ErrCodeNo)
+ case readIdleTimerMsg:
+ sc.handlePingTimer(lastFrameTime)
case shutdownTimerMsg:
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
return
@@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
// If the peer is causing us to generate a lot of control frames,
// but not reading them from us, assume they are trying to make us
// run out of memory.
- if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
+ if sc.queuedControlFrames > maxQueuedControlFrames {
sc.vlogf("http2: too many control frames in send queue, closing connection")
return
}
@@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
}
}
+func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
+ if sc.pingSent {
+ sc.vlogf("timeout waiting for PING response")
+ sc.conn.Close()
+ return
+ }
+
+ pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
+ now := sc.srv.now()
+ if pingAt.After(now) {
+ // We received frames since arming the ping timer.
+ // Reset it for the next possible timeout.
+ sc.readIdleTimer.Reset(pingAt.Sub(now))
+ return
+ }
+
+ sc.pingSent = true
+ // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
+ // is we send a PING frame containing 0s.
+ _, _ = rand.Read(sc.sentPingData[:])
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePing{data: sc.sentPingData},
+ })
+ sc.readIdleTimer.Reset(sc.pingTimeout)
+}
+
type serverMessage int
// Message values sent to serveMsgCh.
var (
settingsTimerMsg = new(serverMessage)
idleTimerMsg = new(serverMessage)
+ readIdleTimerMsg = new(serverMessage)
shutdownTimerMsg = new(serverMessage)
gracefulShutdownMsg = new(serverMessage)
handlerDoneMsg = new(serverMessage)
@@ -1068,6 +1083,7 @@ var (
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
func (sc *serverConn) sendServeMsg(msg interface{}) {
@@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
sc.writingFrame = false
sc.writingFrameAsync = false
+ if res.err != nil {
+ sc.conn.Close()
+ }
+
wr := res.wr
if writeEndsStream(wr.write) {
@@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
if f.IsAck() {
+ if sc.pingSent && sc.sentPingData == f.Data {
+ // This is a response to a PING we sent.
+ sc.pingSent = false
+ sc.readIdleTimer.Reset(sc.readIdleTimeout)
+ }
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
st.cw.Init()
st.flow.conn = &sc.flow // link to conn-level counter
st.flow.add(sc.initialStreamSendWindowSize)
- st.inflow.init(sc.srv.initialStreamRecvWindowSize())
+ st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
@@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
- f := sc.srv.CountError
+ f := sc.countErrorFunc
if f == nil {
return err
}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 61f511f9..0c5f64aa 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "os"
"sort"
"strconv"
"strings"
@@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
}
func (t *Transport) maxHeaderListSize() uint32 {
- if t.MaxHeaderListSize == 0 {
+ n := int64(t.MaxHeaderListSize)
+ if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
+ n = t.t1.MaxResponseHeaderBytes
+ if n > 0 {
+ n = adjustHTTP1MaxHeaderSize(n)
+ }
+ }
+ if n <= 0 {
return 10 << 20
}
- if t.MaxHeaderListSize == 0xffffffff {
+ if n >= 0xffffffff {
return 0
}
- return t.MaxHeaderListSize
-}
-
-func (t *Transport) maxFrameReadSize() uint32 {
- if t.MaxReadFrameSize == 0 {
- return 0 // use the default provided by the peer
- }
- if t.MaxReadFrameSize < minMaxFrameSize {
- return minMaxFrameSize
- }
- if t.MaxReadFrameSize > maxFrameSize {
- return maxFrameSize
- }
- return t.MaxReadFrameSize
+ return uint32(n)
}
func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
-func (t *Transport) pingTimeout() time.Duration {
- if t.PingTimeout == 0 {
- return 15 * time.Second
- }
- return t.PingTimeout
-
-}
-
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
// It returns an error if t1 has already been HTTP/2-enabled.
//
@@ -370,11 +355,14 @@ type ClientConn struct {
lastActive time.Time
lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- peerMaxHeaderTableSize uint32
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
+ initialStreamRecvWindowSize int32
+ readIdleTimeout time.Duration
+ pingTimeout time.Duration
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
}
type stickyErrWriter struct {
+ group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
if *sew.err != nil {
return 0, *sew.err
}
- for {
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
- }
- nn, err := sew.conn.Write(p[n:])
- n += nn
- if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
- // Keep extending the deadline so long as we're making progress.
- continue
- }
- if sew.timeout != 0 {
- sew.conn.SetWriteDeadline(time.Time{})
- }
- *sew.err = err
- return n, err
- }
+ n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ *sew.err = err
+ return n, err
}
// noCachedConnError is the concrete type of ErrNoCachedConn, which
@@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
-func (t *Transport) maxDecoderHeaderTableSize() uint32 {
- if v := t.MaxDecoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
-func (t *Transport) maxEncoderHeaderTableSize() uint32 {
- if v := t.MaxEncoderHeaderTableSize; v > 0 {
- return v
- }
- return initialHeaderTableSize
-}
-
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ conf := configFromTransport(t)
cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
- streams: make(map[uint32]*clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
- pings: make(map[[8]byte]chan struct{}),
- reqHeaderMu: make(chan struct{}, 1),
- }
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
+ maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ readIdleTimeout: conf.SendPingTimeout,
+ pingTimeout: conf.PingTimeout,
+ pings: make(map[[8]byte]chan struct{}),
+ reqHeaderMu: make(chan struct{}, 1),
+ }
+ var group synctestGroupInterface
if t.transportTestHooks != nil {
t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
+ group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
+ group: group,
conn: c,
- timeout: t.WriteByteTimeout,
+ timeout: conf.WriteByteTimeout,
err: &cc.werr,
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
- if t.maxFrameReadSize() != 0 {
- cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
- }
+ cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
if t.CountError != nil {
cc.fr.countError = t.CountError
}
- maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
cc.henc = hpack.NewEncoder(&cc.hbuf)
- cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
cc.peerMaxHeaderTableSize = initialHeaderTableSize
if cs, ok := c.(connectionStater); ok {
@@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialSettings := []Setting{
{ID: SettingEnablePush, Val: 0},
- {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- }
- if max := t.maxFrameReadSize(); max != 0 {
- initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
+ {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
}
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
@@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
- cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
- cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
+ cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
+ cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
cc.bw.Flush()
if cc.werr != nil {
cc.Close()
@@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
}
func (cc *ClientConn) healthCheck() {
- pingTimeout := cc.t.pingTimeout()
+ pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
@@ -2199,7 +2164,7 @@ type resAndError struct {
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
- cs.inflow.init(transportDefaultStreamFlow)
+ cs.inflow.init(cc.initialStreamRecvWindowSize)
cs.ID = cc.nextStreamID
cc.nextStreamID += 2
cc.streams[cs.ID] = cs
@@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
gotSettings := false
- readIdleTimeout := cc.t.ReadIdleTimeout
+ readIdleTimeout := cc.readIdleTimeout
var t timer
if readIdleTimeout != 0 {
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 33f61398..6ff6bee7 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+type writePing struct {
+ data [8]byte
+}
+
+func (w writePing) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(false, w.data)
+}
+
+func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/sync/LICENSE
+++ b/vendor/golang.org/x/sync/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/sys/LICENSE
+++ b/vendor/golang.org/x/sys/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 8fa707aa..02609d5b 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -105,6 +105,8 @@ var ARM64 struct {
HasSVE bool // Scalable Vector Extensions
HasSVE2 bool // Scalable Vector Extensions 2
HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32
+ HasDIT bool // Data Independent Timing support
+ HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions
_ CacheLinePad
}
@@ -199,6 +201,25 @@ var S390X struct {
_ CacheLinePad
}
+// RISCV64 contains the supported CPU features and performance characteristics for riscv64
+// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate
+// the presence of RISC-V extensions.
+//
+// It is safe to assume that all the RV64G extensions are supported and so they are omitted from
+// this structure. As riscv64 Go programs require at least RV64G, the code that populates
+// this structure cannot run successfully if some of the RV64G extensions are missing.
+// The struct is padded to avoid false sharing.
+var RISCV64 struct {
+ _ CacheLinePad
+ HasFastMisaligned bool // Fast misaligned accesses
+ HasC bool // Compressed instruction-set extension
+ HasV bool // Vector extension compatible with RVV 1.0
+ HasZba bool // Address generation instructions extension
+ HasZbb bool // Basic bit-manipulation extension
+ HasZbs bool // Single-bit instructions extension
+ _ CacheLinePad
+}
+
func init() {
archInit()
initOptions()
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
index 0e27a21e..af2aa99f 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -38,6 +38,8 @@ func initOptions() {
{Name: "dcpop", Feature: &ARM64.HasDCPOP},
{Name: "asimddp", Feature: &ARM64.HasASIMDDP},
{Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM},
+ {Name: "dit", Feature: &ARM64.HasDIT},
+ {Name: "i8mm", Feature: &ARM64.HasI8MM},
}
}
@@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
ARM64.HasLRCPC = true
}
+ switch extractBits(isar1, 52, 55) {
+ case 1:
+ ARM64.HasI8MM = true
+ }
+
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:
@@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
parseARM64SVERegister(getzfr0())
}
+
+ switch extractBits(pfr0, 48, 51) {
+ case 1:
+ ARM64.HasDIT = true
+ }
}
func parseARM64SVERegister(zfr0 uint64) {
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
index 3d386d0f..08f35ea1 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go
@@ -35,8 +35,10 @@ const (
hwcap_SHA512 = 1 << 21
hwcap_SVE = 1 << 22
hwcap_ASIMDFHM = 1 << 23
+ hwcap_DIT = 1 << 24
hwcap2_SVE2 = 1 << 1
+ hwcap2_I8MM = 1 << 13
)
// linuxKernelCanEmulateCPUID reports whether we're running
@@ -106,9 +108,12 @@ func doinit() {
ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512)
ARM64.HasSVE = isSet(hwCap, hwcap_SVE)
ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM)
+ ARM64.HasDIT = isSet(hwCap, hwcap_DIT)
+
// HWCAP2 feature bits
ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2)
+ ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM)
}
func isSet(hwc uint, value uint) bool {
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index cd63e733..7d902b68 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x
+//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
new file mode 100644
index 00000000..cb4a0c57
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
@@ -0,0 +1,137 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe
+// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available.
+//
+// A note on detection of the Vector extension using HWCAP.
+//
+// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5.
+// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe
+// syscall is not available then neither is the Vector extension (which needs kernel support).
+// The riscv_hwprobe syscall should then be all we need to detect the Vector extension.
+// However, some RISC-V board manufacturers ship boards with an older kernel on top of which
+// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe
+// patches. These kernels advertise support for the Vector extension using HWCAP. Falling
+// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not
+// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option.
+//
+// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by
+// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board
+// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified
+// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use
+// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector
+// extension are binary incompatible. HWCAP can then not be used in isolation to populate the
+// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0.
+//
+// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector
+// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype
+// register. This check would allow us to safely detect version 1.0 of the Vector extension
+// with HWCAP, if riscv_hwprobe were not available. However, the check cannot
+// be added until the assembler supports the Vector instructions.
+//
+// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the
+// extensions it advertises support for are explicitly versioned. It's also worth noting that
+// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba.
+// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority
+// of RISC-V extensions.
+//
+// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information.
+
+// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must
+// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall
+// here.
+
+const (
+ // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+ riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4
+ riscv_HWPROBE_IMA_C = 0x2
+ riscv_HWPROBE_IMA_V = 0x4
+ riscv_HWPROBE_EXT_ZBA = 0x8
+ riscv_HWPROBE_EXT_ZBB = 0x10
+ riscv_HWPROBE_EXT_ZBS = 0x20
+ riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
+ riscv_HWPROBE_MISALIGNED_FAST = 0x3
+ riscv_HWPROBE_MISALIGNED_MASK = 0x7
+)
+
+const (
+ // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go.
+ sys_RISCV_HWPROBE = 258
+)
+
+// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go.
+type riscvHWProbePairs struct {
+ key int64
+ value uint64
+}
+
+const (
+ // CPU features
+ hwcap_RISCV_ISA_C = 1 << ('C' - 'A')
+)
+
+func doinit() {
+ // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key
+ // field should be initialised with one of the key constants defined above, e.g.,
+ // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value.
+ // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0.
+
+ pairs := []riscvHWProbePairs{
+ {riscv_HWPROBE_KEY_IMA_EXT_0, 0},
+ {riscv_HWPROBE_KEY_CPUPERF_0, 0},
+ }
+
+ // This call only indicates that extensions are supported if they are implemented on all cores.
+ if riscvHWProbe(pairs, 0) {
+ if pairs[0].key != -1 {
+ v := uint(pairs[0].value)
+ RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C)
+ RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V)
+ RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
+ RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
+ RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
+ }
+ if pairs[1].key != -1 {
+ v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
+ RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST
+ }
+ }
+
+ // Let's double check with HWCAP if the C extension does not appear to be supported.
+ // This may happen if we're running on a kernel older than 6.4.
+
+ if !RISCV64.HasC {
+ RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C)
+ }
+}
+
+func isSet(hwc uint, value uint) bool {
+ return hwc&value != 0
+}
+
+// riscvHWProbe is a simplified version of the generated wrapper function found in
+// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the
+// cpuCount and cpus parameters which we do not need. We always want to pass 0 for
+// these parameters here so the kernel only reports the extensions that are present
+// on all cores.
+func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool {
+ var _zero uintptr
+ var p0 unsafe.Pointer
+ if len(pairs) > 0 {
+ p0 = unsafe.Pointer(&pairs[0])
+ } else {
+ p0 = unsafe.Pointer(&_zero)
+ }
+
+ _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0)
+ return e1 == 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
index 7f0c79c0..aca3199c 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go
@@ -8,4 +8,13 @@ package cpu
const cacheLineSize = 64
-func initOptions() {}
+func initOptions() {
+ options = []option{
+ {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned},
+ {Name: "c", Feature: &RISCV64.HasC},
+ {Name: "v", Feature: &RISCV64.HasV},
+ {Name: "zba", Feature: &RISCV64.HasZba},
+ {Name: "zbb", Feature: &RISCV64.HasZbb},
+ {Name: "zbs", Feature: &RISCV64.HasZbs},
+ }
+}
diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md
index 7d3c060e..6e08a76a 100644
--- a/vendor/golang.org/x/sys/unix/README.md
+++ b/vendor/golang.org/x/sys/unix/README.md
@@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these
into a common file for each OS.
The merge is performed in the following steps:
-1. Construct the set of common code that is idential in all architecture-specific files.
+1. Construct the set of common code that is identical in all architecture-specific files.
2. Write this common code to the merged file.
3. Remove the common code from all architecture-specific files.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 4ed2e488..ac54ecab 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -58,6 +58,7 @@ includes_Darwin='
#define _DARWIN_USE_64_BIT_INODE
#define __APPLE_USE_RFC_3542
#include
+#include
#include
#include
#include
@@ -551,6 +552,7 @@ ccflags="$@"
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ ||
+ $2 ~ /^(CONNECT|SAE)_/ ||
$2 ~ /^FIORDCHK$/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
@@ -654,7 +656,7 @@ errors=$(
signals=$(
echo '#include ' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
@@ -664,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags |
sort >_error.grep
echo '#include ' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
- grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' |
+ grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go
index 67ce6cef..6f15ba1e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_aix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_aix.go
@@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int,
var status _C_int
var r Pid_t
err = ERESTART
- // AIX wait4 may return with ERESTART errno, while the processus is still
+ // AIX wait4 may return with ERESTART errno, while the process is still
// active.
for err == ERESTART {
r, err = wait4(Pid_t(pid), &status, options, rusage)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 4cc7b005..099867de 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error {
return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq))
}
+//sys renamexNp(from string, to string, flag uint32) (err error)
+
+func RenamexNp(from string, to string, flag uint32) (err error) {
+ return renamexNp(from, to, flag)
+}
+
+//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error)
+
+func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) {
+ return renameatxNp(fromfd, from, tofd, to, flag)
+}
+
//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL
func Uname(uname *Utsname) error {
@@ -554,6 +566,43 @@ func PthreadFchdir(fd int) (err error) {
return pthread_fchdir_np(fd)
}
+// Connectx calls connectx(2) to initiate a connection on a socket.
+//
+// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument.
+//
+// - srcIf is the optional source interface index. 0 means unspecified.
+// - srcAddr is the optional source address. nil means unspecified.
+// - dstAddr is the destination address.
+//
+// On success, Connectx returns the number of bytes enqueued for transmission.
+func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) {
+ endpoints := SaEndpoints{
+ Srcif: srcIf,
+ }
+
+ if srcAddr != nil {
+ addrp, addrlen, err := srcAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Srcaddr = (*RawSockaddr)(addrp)
+ endpoints.Srcaddrlen = uint32(addrlen)
+ }
+
+ if dstAddr != nil {
+ addrp, addrlen, err := dstAddr.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ endpoints.Dstaddr = (*RawSockaddr)(addrp)
+ endpoints.Dstaddrlen = uint32(addrlen)
+ }
+
+ err = connectx(fd, &endpoints, associd, flags, iov, &n, connid)
+ return
+}
+
+//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go
index ba46651f..a6a2d2fc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_hurd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go
@@ -11,6 +11,7 @@ package unix
int ioctl(int, unsigned long int, uintptr_t);
*/
import "C"
+import "unsafe"
func ioctl(fd int, req uint, arg uintptr) (err error) {
r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg))
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 5682e262..f08abd43 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) {
return &value, err
}
+// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPVegasInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
+// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr"
+// algorithm.
+//
+// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option:
+//
+// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION)
+func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) {
+ var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment
+ vallen := _Socklen(SizeofTCPCCInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ out := (*TCPBBRInfo)(unsafe.Pointer(&value[0]))
+ return out, err
+}
+
// GetsockoptString returns the string value of the socket option opt for the
// socket associated with fd at the given socket level.
func GetsockoptString(fd, level, opt int) (string, error) {
@@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) {
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
-//sys Getrandom(buf []byte, flags int) (n int, err error)
+
+func Getrandom(buf []byte, flags int) (n int, err error) {
+ vdsoRet, supported := vgetrandom(buf, uint32(flags))
+ if supported {
+ if vdsoRet < 0 {
+ return 0, errnoErr(syscall.Errno(-vdsoRet))
+ }
+ return vdsoRet, nil
+ }
+ var p *byte
+ if len(buf) > 0 {
+ p = &buf[0]
+ }
+ r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags))
+ if e != 0 {
+ return 0, errnoErr(e)
+ }
+ return int(r), nil
+}
+
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettid() (tid int)
@@ -2592,3 +2653,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
}
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
+//sys Mseal(b []byte, flags uint) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index cf2ee6c7..745e5c7e 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 3d0e9845..dd2262a4 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 6f5a2889..8cf3670b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error
}
return riscvHWProbe(pairs, setSize, set, flags)
}
+
+const SYS_FSTATAT = SYS_NEWFSTATAT
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index b25343c7..b86ded54 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -293,6 +293,7 @@ func Uname(uname *Utsname) error {
//sys Mkfifoat(dirfd int, path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
new file mode 100644
index 00000000..07ac8e09
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go
@@ -0,0 +1,13 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux && go1.24
+
+package unix
+
+import _ "unsafe"
+
+//go:linkname vgetrandom runtime.vgetrandom
+//go:noescape
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool)
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
similarity index 56%
rename from vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
rename to vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
index 1a9efa12..297e97bc 100644
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go
+++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go
@@ -2,13 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.20
-// +build go1.20
+//go:build !linux || !go1.24
-package versions
+package unix
-func init() {
- if Compare(toolchain, Go1_20) < 0 {
- toolchain = Go1_20
- }
+func vgetrandom(p []byte, flags uint32) (ret int, supported bool) {
+ return -1, false
}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
index e40fa852..d73c4652 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1169,6 +1172,11 @@ const (
PT_WRITE_D = 0x5
PT_WRITE_I = 0x4
PT_WRITE_U = 0x6
+ RENAME_EXCL = 0x4
+ RENAME_NOFOLLOW_ANY = 0x10
+ RENAME_RESERVED1 = 0x8
+ RENAME_SECLUDE = 0x1
+ RENAME_SWAP = 0x2
RLIMIT_AS = 0x5
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
@@ -1260,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
index bb02aa6c..4a55a400 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go
@@ -237,6 +237,9 @@ const (
CLOCK_UPTIME_RAW_APPROX = 0x9
CLONE_NOFOLLOW = 0x1
CLONE_NOOWNERCOPY = 0x2
+ CONNECT_DATA_AUTHENTICATED = 0x4
+ CONNECT_DATA_IDEMPOTENT = 0x2
+ CONNECT_RESUME_ON_READ_WRITE = 0x1
CR0 = 0x0
CR1 = 0x1000
CR2 = 0x2000
@@ -1169,6 +1172,11 @@ const (
PT_WRITE_D = 0x5
PT_WRITE_I = 0x4
PT_WRITE_U = 0x6
+ RENAME_EXCL = 0x4
+ RENAME_NOFOLLOW_ANY = 0x10
+ RENAME_RESERVED1 = 0x8
+ RENAME_SECLUDE = 0x1
+ RENAME_SWAP = 0x2
RLIMIT_AS = 0x5
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
@@ -1260,6 +1268,10 @@ const (
RTV_SSTHRESH = 0x20
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
+ SAE_ASSOCID_ALL = 0xffffffff
+ SAE_ASSOCID_ANY = 0x0
+ SAE_CONNID_ALL = 0xffffffff
+ SAE_CONNID_ANY = 0x0
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 877a62b4..de3b4624 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -457,6 +457,7 @@ const (
B600 = 0x8
B75 = 0x2
B9600 = 0xd
+ BCACHEFS_SUPER_MAGIC = 0xca451a4e
BDEVFS_MAGIC = 0x62646576
BINDERFS_SUPER_MAGIC = 0x6c6f6f70
BINFMTFS_MAGIC = 0x42494e4d
@@ -494,6 +495,7 @@ const (
BPF_F_TEST_REG_INVARIANTS = 0x80
BPF_F_TEST_RND_HI32 = 0x4
BPF_F_TEST_RUN_ON_CPU = 0x1
+ BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4
BPF_F_TEST_STATE_FREQ = 0x8
BPF_F_TEST_XDP_LIVE_FRAMES = 0x2
BPF_F_XDP_DEV_BOUND_ONLY = 0x40
@@ -928,6 +930,7 @@ const (
EPOLL_CTL_ADD = 0x1
EPOLL_CTL_DEL = 0x2
EPOLL_CTL_MOD = 0x3
+ EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
ESP_V4_FLOW = 0xa
ESP_V6_FLOW = 0xc
@@ -941,9 +944,6 @@ const (
ETHTOOL_FEC_OFF = 0x4
ETHTOOL_FEC_RS = 0x8
ETHTOOL_FLAG_ALL = 0x7
- ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
- ETHTOOL_FLAG_OMIT_REPLY = 0x2
- ETHTOOL_FLAG_STATS = 0x4
ETHTOOL_FLASHDEV = 0x33
ETHTOOL_FLASH_MAX_FILENAME = 0x80
ETHTOOL_FWVERS_LEN = 0x20
@@ -1705,6 +1705,7 @@ const (
KEXEC_ARCH_S390 = 0x160000
KEXEC_ARCH_SH = 0x2a0000
KEXEC_ARCH_X86_64 = 0x3e0000
+ KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8
KEXEC_FILE_DEBUG = 0x8
KEXEC_FILE_NO_INITRAMFS = 0x4
KEXEC_FILE_ON_CRASH = 0x2
@@ -1780,6 +1781,7 @@ const (
KEY_SPEC_USER_KEYRING = -0x4
KEY_SPEC_USER_SESSION_KEYRING = -0x5
LANDLOCK_ACCESS_FS_EXECUTE = 0x1
+ LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000
LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800
LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40
LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80
@@ -1861,6 +1863,19 @@ const (
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
+ MAP_HUGE_16GB = 0x88000000
+ MAP_HUGE_16KB = 0x38000000
+ MAP_HUGE_16MB = 0x60000000
+ MAP_HUGE_1GB = 0x78000000
+ MAP_HUGE_1MB = 0x50000000
+ MAP_HUGE_256MB = 0x70000000
+ MAP_HUGE_2GB = 0x7c000000
+ MAP_HUGE_2MB = 0x54000000
+ MAP_HUGE_32MB = 0x64000000
+ MAP_HUGE_512KB = 0x4c000000
+ MAP_HUGE_512MB = 0x74000000
+ MAP_HUGE_64KB = 0x40000000
+ MAP_HUGE_8MB = 0x5c000000
MAP_HUGE_MASK = 0x3f
MAP_HUGE_SHIFT = 0x1a
MAP_PRIVATE = 0x2
@@ -1908,6 +1923,7 @@ const (
MNT_EXPIRE = 0x4
MNT_FORCE = 0x1
MNT_ID_REQ_SIZE_VER0 = 0x18
+ MNT_ID_REQ_SIZE_VER1 = 0x20
MODULE_INIT_COMPRESSED_FILE = 0x4
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
MODULE_INIT_IGNORE_VERMAGIC = 0x2
@@ -2173,7 +2189,7 @@ const (
NFT_REG_SIZE = 0x10
NFT_REJECT_ICMPX_MAX = 0x3
NFT_RT_MAX = 0x4
- NFT_SECMARK_CTX_MAXLEN = 0x100
+ NFT_SECMARK_CTX_MAXLEN = 0x1000
NFT_SET_MAXNAMELEN = 0x100
NFT_SOCKET_MAX = 0x3
NFT_TABLE_F_MASK = 0x7
@@ -2342,9 +2358,11 @@ const (
PERF_MEM_LVLNUM_IO = 0xa
PERF_MEM_LVLNUM_L1 = 0x1
PERF_MEM_LVLNUM_L2 = 0x2
+ PERF_MEM_LVLNUM_L2_MHB = 0x5
PERF_MEM_LVLNUM_L3 = 0x3
PERF_MEM_LVLNUM_L4 = 0x4
PERF_MEM_LVLNUM_LFB = 0xc
+ PERF_MEM_LVLNUM_MSC = 0x6
PERF_MEM_LVLNUM_NA = 0xf
PERF_MEM_LVLNUM_PMEM = 0xe
PERF_MEM_LVLNUM_RAM = 0xd
@@ -2417,6 +2435,7 @@ const (
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
+ PROCFS_IOCTL_MAGIC = 'f'
PROC_SUPER_MAGIC = 0x9fa0
PROT_EXEC = 0x4
PROT_GROWSDOWN = 0x1000000
@@ -2498,6 +2517,23 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
+ PR_PPC_DEXCR_CTRL_CLEAR = 0x4
+ PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
+ PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
+ PR_PPC_DEXCR_CTRL_MASK = 0x1f
+ PR_PPC_DEXCR_CTRL_SET = 0x2
+ PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8
+ PR_PPC_DEXCR_IBRTPD = 0x1
+ PR_PPC_DEXCR_NPHIE = 0x3
+ PR_PPC_DEXCR_SBHE = 0x0
+ PR_PPC_DEXCR_SRAPD = 0x2
+ PR_PPC_GET_DEXCR = 0x48
+ PR_PPC_SET_DEXCR = 0x49
+ PR_RISCV_CTX_SW_FENCEI_OFF = 0x1
+ PR_RISCV_CTX_SW_FENCEI_ON = 0x0
+ PR_RISCV_SCOPE_PER_PROCESS = 0x0
+ PR_RISCV_SCOPE_PER_THREAD = 0x1
+ PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47
PR_RISCV_V_GET_CONTROL = 0x46
PR_RISCV_V_SET_CONTROL = 0x45
PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3
@@ -2902,11 +2938,12 @@ const (
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
+ RWF_ATOMIC = 0x40
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x3f
+ RWF_SUPPORTED = 0x7f
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3179,6 +3216,7 @@ const (
STATX_ATTR_MOUNT_ROOT = 0x2000
STATX_ATTR_NODUMP = 0x40
STATX_ATTR_VERITY = 0x100000
+ STATX_ATTR_WRITE_ATOMIC = 0x400000
STATX_BASIC_STATS = 0x7ff
STATX_BLOCKS = 0x400
STATX_BTIME = 0x800
@@ -3192,8 +3230,10 @@ const (
STATX_MTIME = 0x40
STATX_NLINK = 0x4
STATX_SIZE = 0x200
+ STATX_SUBVOL = 0x8000
STATX_TYPE = 0x1
STATX_UID = 0x8
+ STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
@@ -3592,6 +3632,7 @@ const (
XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000
XDP_UMEM_PGOFF_FILL_RING = 0x100000000
XDP_UMEM_REG = 0x4
+ XDP_UMEM_TX_METADATA_LEN = 0x4
XDP_UMEM_TX_SW_CSUM = 0x2
XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1
XDP_USE_NEED_WAKEUP = 0x8
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index e4bc0bd5..8aa6d77c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -151,9 +153,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 689317af..da428f42 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -151,9 +153,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 5cca668a..bf45bfec 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 14270508..71c67162 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
ESR_MAGIC = 0x45535201
EXTPROC = 0x10000
@@ -152,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 28e39afd..9476628f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -152,9 +154,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index cd66e92c..b9e85f3c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x80
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index c1595eba..a48b68a7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x80
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ee9456b0..ea00e852 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x80
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 8cfca81e..91c64687 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x80
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x20
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 60b0deb3..8cbf38d6 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x20
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000000
FF1 = 0x4000
@@ -150,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index f90aa728..a2df7341 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x20
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000000
FF1 = 0x4000
@@ -150,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index ba9e0150..24791379 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x20
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000000
FF1 = 0x4000
@@ -150,9 +152,14 @@ const (
NL3 = 0x300
NLDLY = 0x300
NOFLSH = 0x80000000
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x4
ONLCR = 0x2
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 07cdfd6e..d265f146 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 2f1dd214..3f2d6443 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -78,6 +78,8 @@ const (
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
+ EPIOCGPARAMS = 0x80088a02
+ EPIOCSPARAMS = 0x40088a01
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -148,9 +150,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x8008b705
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
+ NS_GET_PID_FROM_PIDNS = 0x8004b706
+ NS_GET_PID_IN_PIDNS = 0x8004b708
+ NS_GET_TGID_FROM_PIDNS = 0x8004b707
+ NS_GET_TGID_IN_PIDNS = 0x8004b709
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index f40519d9..5d8b727a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -82,6 +82,8 @@ const (
EFD_CLOEXEC = 0x400000
EFD_NONBLOCK = 0x4000
EMT_TAGOVF = 0x1
+ EPIOCGPARAMS = 0x40088a02
+ EPIOCSPARAMS = 0x80088a01
EPOLL_CLOEXEC = 0x400000
EXTPROC = 0x10000
FF1 = 0x8000
@@ -153,9 +155,14 @@ const (
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
+ NS_GET_MNTNS_ID = 0x4008b705
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
+ NS_GET_PID_FROM_PIDNS = 0x4004b706
+ NS_GET_PID_IN_PIDNS = 0x4004b708
+ NS_GET_TGID_FROM_PIDNS = 0x4004b707
+ NS_GET_TGID_IN_PIDNS = 0x4004b709
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
index da08b2ab..1ec2b140 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go
@@ -581,6 +581,8 @@ const (
AT_EMPTY_PATH = 0x1000
AT_REMOVEDIR = 0x200
RENAME_NOREPLACE = 1 << 0
+ ST_RDONLY = 1
+ ST_NOSUID = 2
)
const (
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 07642c30..24b346e1 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func renamexNp(from string, to string, flag uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_renamex_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_renameatx_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
@@ -793,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index 923e08cb..ebd21310 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
+TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renamex_np(SB)
+GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB)
+
+TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renameatx_np(SB)
+GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB)
+
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
@@ -238,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 7d73dda6..824b9c2d 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func renamexNp(from string, to string, flag uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_renamex_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(from)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(to)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_renameatx_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
var _p0 unsafe.Pointer
if len(mib) > 0 {
@@ -793,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) {
+ var _p0 unsafe.Pointer
+ if len(iov) > 0 {
+ _p0 = unsafe.Pointer(&iov[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_connectx_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 05770011..4f178a22 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -223,6 +223,16 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
+TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renamex_np(SB)
+GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB)
+
+TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renameatx_np(SB)
+GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB)
+
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
@@ -238,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connectx(SB)
+GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8
+DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 87d8612a..af30da55 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Getrandom(buf []byte, flags int) (n int, err error) {
- var _p0 unsafe.Pointer
- if len(buf) > 0 {
- _p0 = unsafe.Pointer(&buf[0])
- } else {
- _p0 = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags))
- n = int(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Getrusage(who int, rusage *Rusage) (err error) {
_, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
@@ -2229,3 +2212,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint)
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mseal(b []byte, flags uint) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 9dc42410..1851df14 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
index 41b56173..0b43c693 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 0d3a0751..e1ec0dbe 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
index 4019a656..880c6d6e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index c39f7776..7c8452a6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
index ac4af24f..b8ef95b0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 57571d07..2ffdf861 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
index f77d5321..2af3b5c7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index e62963e6..1da08d52 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
index fae140b6..b7a25135 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 00831354..6e85b0aa 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
index 9d1e0ff0..f15dadf0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s
@@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ CALL libc_mount(SB)
+ RET
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
CALL libc_nanosleep(SB)
RET
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 79029ed5..28b487df 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(fsType)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(dir)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_mount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mount mount "libc.so"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
_, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
index da115f9a..1e7f321e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s
@@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
+TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mount(SB)
+GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
+DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
+
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 53aef5dc..524b0820 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -457,4 +457,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 71d52476..f485dbf4 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -341,6 +341,7 @@ const (
SYS_STATX = 332
SYS_IO_PGETEVENTS = 333
SYS_RSEQ = 334
+ SYS_URETPROBE = 335
SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426
@@ -379,4 +380,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index c7477061..70b35bf3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -421,4 +421,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index f96e214f..1893e2fe 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -85,7 +85,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
@@ -324,4 +324,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 28425346..16a4017d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -84,6 +84,8 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
+ SYS_NEWFSTATAT = 79
+ SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83
@@ -318,4 +320,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index d0953018..7e567f1e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -441,4 +441,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 4459
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
+ SYS_MSEAL = 4462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 295c7f4b..38ae55e5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -371,4 +371,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 5459
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
+ SYS_MSEAL = 5462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index d1a9eaca..55e92e60 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -371,4 +371,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 5459
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
+ SYS_MSEAL = 5462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index bec157c3..60658d6a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -441,4 +441,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 4459
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
+ SYS_MSEAL = 4462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index 7ee7bdc4..e203e8a7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -448,4 +448,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index fad1f25b..5944b97d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -420,4 +420,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 7d3e1635..c66d416d 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -420,4 +420,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 0ed53ad9..a5459e76 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -84,7 +84,7 @@ const (
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
- SYS_FSTATAT = 79
+ SYS_NEWFSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
@@ -325,4 +325,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 2fba04ad..01d86825 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -386,4 +386,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 621d00d7..7b703e77 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -399,4 +399,5 @@ const (
SYS_LSM_GET_SELF_ATTR = 459
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
+ SYS_MSEAL = 462
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 091d107f..d003c3d4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index 28ff4ef7..0d45a941 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -306,6 +306,19 @@ type XVSockPgen struct {
type _Socklen uint32
+type SaeAssocID uint32
+
+type SaeConnID uint32
+
+type SaEndpoints struct {
+ Srcif uint32
+ Srcaddr *RawSockaddr
+ Srcaddrlen uint32
+ Dstaddr *RawSockaddr
+ Dstaddrlen uint32
+ _ [4]byte
+}
+
type Xucred struct {
Version uint32
Uid uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 6cbd094a..51e13eb0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -625,6 +625,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index 7c03b6ee..d002d8ef 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -630,6 +630,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index 422107ee..3f863d89 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -616,6 +616,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index 505a12ac..61c72931 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -610,6 +610,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index cc986c79..b5d17414 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -612,6 +612,7 @@ const (
POLLRDNORM = 0x40
POLLWRBAND = 0x100
POLLWRNORM = 0x4
+ POLLRDHUP = 0x4000
)
type CapRights struct {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 4740b834..3a69e454 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -87,30 +87,35 @@ type StatxTimestamp struct {
}
type Statx_t struct {
- Mask uint32
- Blksize uint32
- Attributes uint64
- Nlink uint32
- Uid uint32
- Gid uint32
- Mode uint16
- _ [1]uint16
- Ino uint64
- Size uint64
- Blocks uint64
- Attributes_mask uint64
- Atime StatxTimestamp
- Btime StatxTimestamp
- Ctime StatxTimestamp
- Mtime StatxTimestamp
- Rdev_major uint32
- Rdev_minor uint32
- Dev_major uint32
- Dev_minor uint32
- Mnt_id uint64
- Dio_mem_align uint32
- Dio_offset_align uint32
- _ [12]uint64
+ Mask uint32
+ Blksize uint32
+ Attributes uint64
+ Nlink uint32
+ Uid uint32
+ Gid uint32
+ Mode uint16
+ _ [1]uint16
+ Ino uint64
+ Size uint64
+ Blocks uint64
+ Attributes_mask uint64
+ Atime StatxTimestamp
+ Btime StatxTimestamp
+ Ctime StatxTimestamp
+ Mtime StatxTimestamp
+ Rdev_major uint32
+ Rdev_minor uint32
+ Dev_major uint32
+ Dev_minor uint32
+ Mnt_id uint64
+ Dio_mem_align uint32
+ Dio_offset_align uint32
+ Subvol uint64
+ Atomic_write_unit_min uint32
+ Atomic_write_unit_max uint32
+ Atomic_write_segments_max uint32
+ _ [1]uint32
+ _ [9]uint64
}
type Fsid struct {
@@ -515,6 +520,29 @@ type TCPInfo struct {
Total_rto_time uint32
}
+type TCPVegasInfo struct {
+ Enabled uint32
+ Rttcnt uint32
+ Rtt uint32
+ Minrtt uint32
+}
+
+type TCPDCTCPInfo struct {
+ Enabled uint16
+ Ce_state uint16
+ Alpha uint32
+ Ab_ecn uint32
+ Ab_tot uint32
+}
+
+type TCPBBRInfo struct {
+ Bw_lo uint32
+ Bw_hi uint32
+ Min_rtt uint32
+ Pacing_gain uint32
+ Cwnd_gain uint32
+}
+
type CanFilter struct {
Id uint32
Mask uint32
@@ -556,6 +584,7 @@ const (
SizeofICMPv6Filter = 0x20
SizeofUcred = 0xc
SizeofTCPInfo = 0xf8
+ SizeofTCPCCInfo = 0x14
SizeofCanFilter = 0x8
SizeofTCPRepairOpt = 0x8
)
@@ -2485,7 +2514,7 @@ type XDPMmapOffsets struct {
type XDPUmemReg struct {
Addr uint64
Len uint64
- Chunk_size uint32
+ Size uint32
Headroom uint32
Flags uint32
Tx_metadata_len uint32
@@ -3473,7 +3502,7 @@ const (
DEVLINK_PORT_FN_ATTR_STATE = 0x2
DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3
DEVLINK_PORT_FN_ATTR_CAPS = 0x4
- DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5
+ DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6
)
type FsverityDigest struct {
@@ -3765,7 +3794,7 @@ const (
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2b
+ ETHTOOL_MSG_USER_MAX = 0x2c
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3805,7 +3834,10 @@ const (
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2b
+ ETHTOOL_MSG_KERNEL_MAX = 0x2c
+ ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
+ ETHTOOL_FLAG_OMIT_REPLY = 0x2
+ ETHTOOL_FLAG_STATS = 0x4
ETHTOOL_A_HEADER_UNSPEC = 0x0
ETHTOOL_A_HEADER_DEV_INDEX = 0x1
ETHTOOL_A_HEADER_DEV_NAME = 0x2
@@ -3947,7 +3979,7 @@ const (
ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17
ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18
ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19
- ETHTOOL_A_COALESCE_MAX = 0x1c
+ ETHTOOL_A_COALESCE_MAX = 0x1e
ETHTOOL_A_PAUSE_UNSPEC = 0x0
ETHTOOL_A_PAUSE_HEADER = 0x1
ETHTOOL_A_PAUSE_AUTONEG = 0x2
@@ -3975,7 +4007,7 @@ const (
ETHTOOL_A_TSINFO_TX_TYPES = 0x3
ETHTOOL_A_TSINFO_RX_FILTERS = 0x4
ETHTOOL_A_TSINFO_PHC_INDEX = 0x5
- ETHTOOL_A_TSINFO_MAX = 0x5
+ ETHTOOL_A_TSINFO_MAX = 0x6
ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0
ETHTOOL_A_CABLE_TEST_HEADER = 0x1
ETHTOOL_A_CABLE_TEST_MAX = 0x1
@@ -4605,7 +4637,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14a
+ NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5209,7 +5241,7 @@ const (
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x20
+ NL80211_FREQUENCY_ATTR_MAX = 0x21
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 15adc041..ad05b51a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -727,6 +727,37 @@ const (
RISCV_HWPROBE_EXT_ZBA = 0x8
RISCV_HWPROBE_EXT_ZBB = 0x10
RISCV_HWPROBE_EXT_ZBS = 0x20
+ RISCV_HWPROBE_EXT_ZICBOZ = 0x40
+ RISCV_HWPROBE_EXT_ZBC = 0x80
+ RISCV_HWPROBE_EXT_ZBKB = 0x100
+ RISCV_HWPROBE_EXT_ZBKC = 0x200
+ RISCV_HWPROBE_EXT_ZBKX = 0x400
+ RISCV_HWPROBE_EXT_ZKND = 0x800
+ RISCV_HWPROBE_EXT_ZKNE = 0x1000
+ RISCV_HWPROBE_EXT_ZKNH = 0x2000
+ RISCV_HWPROBE_EXT_ZKSED = 0x4000
+ RISCV_HWPROBE_EXT_ZKSH = 0x8000
+ RISCV_HWPROBE_EXT_ZKT = 0x10000
+ RISCV_HWPROBE_EXT_ZVBB = 0x20000
+ RISCV_HWPROBE_EXT_ZVBC = 0x40000
+ RISCV_HWPROBE_EXT_ZVKB = 0x80000
+ RISCV_HWPROBE_EXT_ZVKG = 0x100000
+ RISCV_HWPROBE_EXT_ZVKNED = 0x200000
+ RISCV_HWPROBE_EXT_ZVKNHA = 0x400000
+ RISCV_HWPROBE_EXT_ZVKNHB = 0x800000
+ RISCV_HWPROBE_EXT_ZVKSED = 0x1000000
+ RISCV_HWPROBE_EXT_ZVKSH = 0x2000000
+ RISCV_HWPROBE_EXT_ZVKT = 0x4000000
+ RISCV_HWPROBE_EXT_ZFH = 0x8000000
+ RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000
+ RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000
+ RISCV_HWPROBE_EXT_ZVFH = 0x40000000
+ RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000
+ RISCV_HWPROBE_EXT_ZFA = 0x100000000
+ RISCV_HWPROBE_EXT_ZTSO = 0x200000000
+ RISCV_HWPROBE_EXT_ZACAS = 0x400000000
+ RISCV_HWPROBE_EXT_ZICOND = 0x800000000
+ RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000
RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5
RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0
RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1
@@ -734,4 +765,6 @@ const (
RISCV_HWPROBE_MISALIGNED_FAST = 0x3
RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4
RISCV_HWPROBE_MISALIGNED_MASK = 0x7
+ RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6
+ RISCV_HWPROBE_WHICH_CPUS = 0x1
)
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index 115341fb..4e613cf6 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) {
return d, nil
}
-// MustLoadDLL is like LoadDLL but panics if load operation failes.
+// MustLoadDLL is like LoadDLL but panics if load operation fails.
func MustLoadDLL(name string) *DLL {
d, e := LoadDLL(name)
if e != nil {
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 97651b5b..b6e1ab76 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -1179,7 +1179,7 @@ type OBJECTS_AND_NAME struct {
//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD
//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW
-//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce
+//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce
// Control returns the security descriptor control bits.
func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) {
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 6525c62f..5cee9a31 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -17,8 +17,10 @@ import (
"unsafe"
)
-type Handle uintptr
-type HWND uintptr
+type (
+ Handle uintptr
+ HWND uintptr
+)
const (
InvalidHandle = ^Handle(0)
@@ -211,6 +213,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error)
//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW
//sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId
+//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW
+//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout
+//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout
+//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx
//sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow
//sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW
//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx
@@ -307,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
//sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition
+//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP
+//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP
+//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP
+//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
@@ -1368,9 +1378,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) {
func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) {
return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4)
}
+
func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) {
return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq)))
}
+
func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) {
return syscall.EWINDOWS
}
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index d8cb71db..7b97a154 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1060,6 +1060,7 @@ const (
SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4
SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12
+ SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15
// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
@@ -2003,7 +2004,21 @@ const (
MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20
)
-const GAA_FLAG_INCLUDE_PREFIX = 0x00000010
+// Flags for GetAdaptersAddresses, see
+// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses.
+const (
+ GAA_FLAG_SKIP_UNICAST = 0x1
+ GAA_FLAG_SKIP_ANYCAST = 0x2
+ GAA_FLAG_SKIP_MULTICAST = 0x4
+ GAA_FLAG_SKIP_DNS_SERVER = 0x8
+ GAA_FLAG_INCLUDE_PREFIX = 0x10
+ GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20
+ GAA_FLAG_INCLUDE_WINS_INFO = 0x40
+ GAA_FLAG_INCLUDE_GATEWAYS = 0x80
+ GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100
+ GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200
+ GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400
+)
const (
IF_TYPE_OTHER = 1
@@ -2017,6 +2032,50 @@ const (
IF_TYPE_IEEE1394 = 144
)
+// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin
+const (
+ IpPrefixOriginOther = 0
+ IpPrefixOriginManual = 1
+ IpPrefixOriginWellKnown = 2
+ IpPrefixOriginDhcp = 3
+ IpPrefixOriginRouterAdvertisement = 4
+ IpPrefixOriginUnchanged = 1 << 4
+)
+
+// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin
+const (
+ NlsoOther = 0
+ NlsoManual = 1
+ NlsoWellKnown = 2
+ NlsoDhcp = 3
+ NlsoLinkLayerAddress = 4
+ NlsoRandom = 5
+ IpSuffixOriginOther = 0
+ IpSuffixOriginManual = 1
+ IpSuffixOriginWellKnown = 2
+ IpSuffixOriginDhcp = 3
+ IpSuffixOriginLinkLayerAddress = 4
+ IpSuffixOriginRandom = 5
+ IpSuffixOriginUnchanged = 1 << 4
+)
+
+// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state
+const (
+ NldsInvalid = 0
+ NldsTentative = 1
+ NldsDuplicate = 2
+ NldsDeprecated = 3
+ NldsPreferred = 4
+ IpDadStateInvalid = 0
+ IpDadStateTentative = 1
+ IpDadStateDuplicate = 2
+ IpDadStateDeprecated = 3
+ IpDadStatePreferred = 4
+)
+
type SocketAddress struct {
Sockaddr *syscall.RawSockaddrAny
SockaddrLength int32
@@ -3404,3 +3463,14 @@ type DCB struct {
EvtChar byte
wReserved1 uint16
}
+
+// Keyboard Layout Flags.
+// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw
+const (
+ KLF_ACTIVATE = 0x00000001
+ KLF_SUBSTITUTE_OK = 0x00000002
+ KLF_REORDER = 0x00000008
+ KLF_REPLACELANG = 0x00000010
+ KLF_NOTELLSHELL = 0x00000080
+ KLF_SETFORPROCESS = 0x00000100
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index eba76101..4c2e1bdc 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -247,7 +247,9 @@ var (
procGetCommandLineW = modkernel32.NewProc("GetCommandLineW")
procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW")
procGetComputerNameW = modkernel32.NewProc("GetComputerNameW")
+ procGetConsoleCP = modkernel32.NewProc("GetConsoleCP")
procGetConsoleMode = modkernel32.NewProc("GetConsoleMode")
+ procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP")
procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo")
procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW")
procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId")
@@ -347,8 +349,10 @@ var (
procSetCommMask = modkernel32.NewProc("SetCommMask")
procSetCommState = modkernel32.NewProc("SetCommState")
procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts")
+ procSetConsoleCP = modkernel32.NewProc("SetConsoleCP")
procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition")
procSetConsoleMode = modkernel32.NewProc("SetConsoleMode")
+ procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP")
procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW")
procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories")
procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW")
@@ -478,12 +482,16 @@ var (
procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow")
procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow")
procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo")
+ procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout")
procGetShellWindow = moduser32.NewProc("GetShellWindow")
procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId")
procIsWindow = moduser32.NewProc("IsWindow")
procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode")
procIsWindowVisible = moduser32.NewProc("IsWindowVisible")
+ procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW")
procMessageBoxW = moduser32.NewProc("MessageBoxW")
+ procToUnicodeEx = moduser32.NewProc("ToUnicodeEx")
+ procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout")
procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW")
@@ -789,6 +797,14 @@ func FreeSid(sid *SID) (err error) {
return
}
+func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetLengthSid(sid *SID) (len uint32) {
r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
len = uint32(r0)
@@ -1225,14 +1241,6 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
return
}
-func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) {
- r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
- if r0 == 0 {
- ret = GetLastError()
- }
- return
-}
-
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
@@ -2158,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) {
return
}
+func GetConsoleCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleMode(console Handle, mode *uint32) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
if r1 == 0 {
@@ -2166,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) {
return
}
+func GetConsoleOutputCP() (cp uint32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ cp = uint32(r0)
+ if cp == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
if r1 == 0 {
@@ -3034,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
return
}
+func SetConsoleCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
if r1 == 0 {
@@ -3050,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) {
return
}
+func SetConsoleOutputCP(cp uint32) (err error) {
+ r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func SetCurrentDirectory(path *uint16) (err error) {
r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
if r1 == 0 {
@@ -4082,6 +4124,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
return
}
+func GetKeyboardLayout(tid uint32) (hkl Handle) {
+ r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0)
+ hkl = Handle(r0)
+ return
+}
+
func GetShellWindow() (shellWindow HWND) {
r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
shellWindow = HWND(r0)
@@ -4115,6 +4163,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) {
return
}
+func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
+ r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0)
+ hkl = Handle(r0)
+ if hkl == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
ret = int32(r0)
@@ -4124,6 +4181,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i
return
}
+func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
+ r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0)
+ ret = int32(r0)
+ return
+}
+
+func UnloadKeyboardLayout(hkl Handle) (err error) {
+ r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) {
var _p0 uint32
if inheritExisting {
diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/text/LICENSE
+++ b/vendor/golang.org/x/text/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f49..93a798ab 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE
index 6a66aea5..2a7cf70d 100644
--- a/vendor/golang.org/x/tools/LICENSE
+++ b/vendor/golang.org/x/tools/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go
index 6bdcf70a..ca71e3e1 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/util.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -7,13 +7,5 @@ package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
-// TODO(adonovan): use go1.22's ast.Unparen.
-func Unparen(e ast.Expr) ast.Expr {
- for {
- p, ok := e.(*ast.ParenExpr)
- if !ok {
- return e
- }
- e = p.X
- }
-}
+// Deprecated: use [ast.Unparen].
+func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) }
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
index 3531ac8f..f1931d10 100644
--- a/vendor/golang.org/x/tools/go/packages/doc.go
+++ b/vendor/golang.org/x/tools/go/packages/doc.go
@@ -64,7 +64,7 @@ graph using the Imports fields.
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
-causes Load to run in LoadFiles mode, collecting minimal information.
+causes Load to run in [LoadFiles] mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
@@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
-uninterpreted to [Load], so that it can interpret them
+uninterpreted to Load, so that it can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
# The driver protocol
-[Load] may be used to load Go packages even in Go projects that use
+Load may be used to load Go packages even in Go projects that use
alternative build systems, by installing an appropriate "driver"
program for the build system and specifying its location in the
GOPACKAGESDRIVER environment variable.
@@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information
is written to the driver's standard input. The driver must write a
JSON-encoded [DriverResponse] message to its standard output. (This
message differs from the JSON schema produced by 'go list'.)
+
+The value of the PWD environment variable seen by the driver process
+is the preferred name of its working directory. (The working directory
+may have other aliases due to symbolic links; see the comment on the
+Dir field of [exec.Cmd] for related information.)
+When the driver process emits in its response the name of a file
+that is a descendant of this directory, it must use an absolute path
+that has the value of PWD as a prefix, to ensure that the returned
+filenames satisfy the original query.
*/
package packages // import "golang.org/x/tools/go/packages"
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
index c2b4b711..8f7afcb5 100644
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -82,7 +82,7 @@ type DriverResponse struct {
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
// findExternalDriver returns the file path of a tool that supplies
-// the build system package structure, or "" if not found."
+// the build system package structure, or "" if not found.
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
index 5c080d21..5fcad6ea 100644
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -9,49 +9,46 @@ import (
"strings"
)
-var allModes = []LoadMode{
- NeedName,
- NeedFiles,
- NeedCompiledGoFiles,
- NeedImports,
- NeedDeps,
- NeedExportFile,
- NeedTypes,
- NeedSyntax,
- NeedTypesInfo,
- NeedTypesSizes,
+var modes = [...]struct {
+ mode LoadMode
+ name string
+}{
+ {NeedName, "NeedName"},
+ {NeedFiles, "NeedFiles"},
+ {NeedCompiledGoFiles, "NeedCompiledGoFiles"},
+ {NeedImports, "NeedImports"},
+ {NeedDeps, "NeedDeps"},
+ {NeedExportFile, "NeedExportFile"},
+ {NeedTypes, "NeedTypes"},
+ {NeedSyntax, "NeedSyntax"},
+ {NeedTypesInfo, "NeedTypesInfo"},
+ {NeedTypesSizes, "NeedTypesSizes"},
+ {NeedModule, "NeedModule"},
+ {NeedEmbedFiles, "NeedEmbedFiles"},
+ {NeedEmbedPatterns, "NeedEmbedPatterns"},
}
-var modeStrings = []string{
- "NeedName",
- "NeedFiles",
- "NeedCompiledGoFiles",
- "NeedImports",
- "NeedDeps",
- "NeedExportFile",
- "NeedTypes",
- "NeedSyntax",
- "NeedTypesInfo",
- "NeedTypesSizes",
-}
-
-func (mod LoadMode) String() string {
- m := mod
- if m == 0 {
+func (mode LoadMode) String() string {
+ if mode == 0 {
return "LoadMode(0)"
}
var out []string
- for i, x := range allModes {
- if x > m {
- break
+ // named bits
+ for _, item := range modes {
+ if (mode & item.mode) != 0 {
+ mode ^= item.mode
+ out = append(out, item.name)
}
- if (m & x) != 0 {
- out = append(out, modeStrings[i])
- m = m ^ x
+ }
+ // unnamed residue
+ if mode != 0 {
+ if out == nil {
+ return fmt.Sprintf("LoadMode(%#x)", int(mode))
}
+ out = append(out, fmt.Sprintf("%#x", int(mode)))
}
- if m != 0 {
- out = append(out, "Unknown")
+ if len(out) == 1 {
+ return out[0]
}
- return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
+ return "(" + strings.Join(out, "|") + ")"
}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 34306ddd..f227f1ba 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -46,11 +46,10 @@ import (
//
// Unfortunately there are a number of open bugs related to
// interactions among the LoadMode bits:
-// - https://github.com/golang/go/issues/48226
-// - https://github.com/golang/go/issues/56633
-// - https://github.com/golang/go/issues/56677
-// - https://github.com/golang/go/issues/58726
-// - https://github.com/golang/go/issues/63517
+// - https://github.com/golang/go/issues/56633
+// - https://github.com/golang/go/issues/56677
+// - https://github.com/golang/go/issues/58726
+// - https://github.com/golang/go/issues/63517
type LoadMode int
const (
@@ -76,7 +75,7 @@ const (
// NeedTypes adds Types, Fset, and IllTyped.
NeedTypes
- // NeedSyntax adds Syntax.
+ // NeedSyntax adds Syntax and Fset.
NeedSyntax
// NeedTypesInfo adds TypesInfo.
@@ -104,25 +103,37 @@ const (
// NeedEmbedPatterns adds EmbedPatterns.
NeedEmbedPatterns
+
+ // Be sure to update loadmode_string.go when adding new items!
)
const (
+ // LoadFiles loads the name and file names for the initial packages.
+ //
// Deprecated: LoadFiles exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
+ // LoadImports loads the name, file names, and import mapping for the initial packages.
+ //
// Deprecated: LoadImports exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadImports = LoadFiles | NeedImports
+ // LoadTypes loads exported type information for the initial packages.
+ //
// Deprecated: LoadTypes exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
+ // LoadSyntax loads typed syntax for the initial packages.
+ //
// Deprecated: LoadSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
+ // LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
+ //
// Deprecated: LoadAllSyntax exists for historical compatibility
// and should not be used. Please directly specify the needed fields using the Need values.
LoadAllSyntax = LoadSyntax | NeedDeps
@@ -237,14 +248,13 @@ type Config struct {
// Load loads and returns the Go packages named by the given patterns.
//
-// Config specifies loading options;
-// nil behaves the same as an empty Config.
+// The cfg parameter specifies loading options; nil behaves the same as an empty [Config].
//
// The [Config.Mode] field is a set of bits that determine what kinds
// of information should be computed and returned. Modes that require
// more information tend to be slower. See [LoadMode] for details
// and important caveats. Its zero value is equivalent to
-// NeedName | NeedFiles | NeedCompiledGoFiles.
+// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles].
//
// Each call to Load returns a new set of [Package] instances.
// The Packages and their Imports form a directed acyclic graph.
@@ -261,7 +271,7 @@ type Config struct {
// Errors associated with a particular package are recorded in the
// corresponding Package's Errors list, and do not cause Load to
// return an error. Clients may need to handle such errors before
-// proceeding with further analysis. The PrintErrors function is
+// proceeding with further analysis. The [PrintErrors] function is
// provided for convenient display of all errors.
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
ld := newLoader(cfg)
@@ -764,6 +774,7 @@ func newLoader(cfg *Config) *loader {
// because we load source if export data is missing.
if ld.ParseFile == nil {
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
+ // We implicitly promise to keep doing ast.Object resolution. :(
const mode = parser.AllErrors | parser.ParseComments
return parser.ParseFile(fset, filename, src, mode)
}
@@ -961,12 +972,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
}
if ld.requestedMode&NeedTypes == 0 {
ld.pkgs[i].Types = nil
- ld.pkgs[i].Fset = nil
ld.pkgs[i].IllTyped = false
}
if ld.requestedMode&NeedSyntax == 0 {
ld.pkgs[i].Syntax = nil
}
+ if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 {
+ ld.pkgs[i].Fset = nil
+ }
if ld.requestedMode&NeedTypesInfo == 0 {
ld.pkgs[i].TypesInfo = nil
}
@@ -1499,6 +1512,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode {
// All these things require knowing the import graph.
loadMode |= NeedImports
}
+ if loadMode&NeedTypes != 0 {
+ // Types require the GoVersion from Module.
+ loadMode |= NeedModule
+ }
return loadMode
}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
index a1dcc40b..df14ffd9 100644
--- a/vendor/golang.org/x/tools/go/packages/visit.go
+++ b/vendor/golang.org/x/tools/go/packages/visit.go
@@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
// PrintErrors returns the number of errors printed.
func PrintErrors(pkgs []*Package) int {
var n int
+ errModules := make(map[*Module]bool)
Visit(pkgs, nil, func(pkg *Package) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
}
+
+ // Print pkg.Module.Error once if present.
+ mod := pkg.Module
+ if mod != nil && mod.Error != nil && !errModules[mod] {
+ errModules[mod] = true
+ fmt.Fprintln(os.Stderr, mod.Error.Err)
+ n++
+ }
})
return n
}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
index d648c3d0..a70b727f 100644
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -51,7 +51,7 @@ type Path string
//
// PO package->object Package.Scope.Lookup
// OT object->type Object.Type
-// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC]
+// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
@@ -63,7 +63,7 @@ type Path string
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRUTrC];
+// - The TT operators are encoded as [EKPRUTrCa];
// two of these ({,Recv}TypeParams) require an integer operand,
// which is encoded as a string of decimal digits.
// - The TO operators are encoded as [AFMO];
@@ -106,6 +106,7 @@ const (
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
opConstraint = 'C' // .Constraint() (TypeParam)
+ opRhs = 'a' // .Rhs() (Alias)
// type->object operators
opAt = 'A' // .At(i) (Tuple)
@@ -227,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.TypeName:
- if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok {
+ if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
// With the exception of type parameters, only package-level type names
// have a path.
return "", fmt.Errorf("no path for %v", obj)
@@ -279,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
path = append(path, opType)
T := o.Type()
+ if alias, ok := T.(*types.Alias); ok {
+ if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil {
+ return Path(r), nil
+ }
- if tname.IsAlias() {
- // type alias
+ } else if tname.IsAlias() {
+ // legacy alias
if r := find(obj, T, path, nil); r != nil {
return Path(r), nil
}
- } else {
- if named, _ := T.(*types.Named); named != nil {
- if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil {
- // generic named type
- return Path(r), nil
- }
- }
+
+ } else if named, ok := T.(*types.Named); ok {
// defined (named) type
- if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
+ if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil {
+ return Path(r), nil
+ }
+ if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil {
return Path(r), nil
}
}
@@ -314,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) {
}
// Inspect declared methods of defined types.
- if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok {
+ if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
path = append(path, opType)
// The method index here is always with respect
// to the underlying go/types data structures,
@@ -443,8 +449,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
// nil, it will be allocated as necessary.
func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
switch T := T.(type) {
- case *aliases.Alias:
- return find(obj, aliases.Unalias(T), path, seen)
+ case *types.Alias:
+ return find(obj, types.Unalias(T), path, seen)
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
@@ -620,7 +626,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
// Inv: t != nil, obj == nil
- t = aliases.Unalias(t)
+ t = types.Unalias(t)
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
@@ -657,6 +663,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) {
}
t = named.Underlying()
+ case opRhs:
+ if alias, ok := t.(*types.Alias); ok {
+ t = aliases.Rhs(alias)
+ } else if false && aliases.Enabled() {
+ // The Enabled check is too expensive, so for now we
+ // simply assume that aliases are not enabled.
+ // TODO(adonovan): replace with "if true {" when go1.24 is assured.
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
+ }
+
case opTypeParam:
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
if !ok {
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
new file mode 100644
index 00000000..75438035
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import (
+ "go/ast"
+ "go/types"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Callee returns the named target of a function call, if any:
+// a function, method, builtin, or variable.
+//
+// Functions and methods may potentially have type parameters.
+func Callee(info *types.Info, call *ast.CallExpr) types.Object {
+ fun := ast.Unparen(call.Fun)
+
+ // Look through type instantiation if necessary.
+ isInstance := false
+ switch fun.(type) {
+ case *ast.IndexExpr, *ast.IndexListExpr:
+ // When extracting the callee from an *IndexExpr, we need to check that
+ // it is a *types.Func and not a *types.Var.
+ // Example: Don't match a slice m within the expression `m[0]()`.
+ isInstance = true
+ fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
+ }
+
+ var obj types.Object
+ switch fun := fun.(type) {
+ case *ast.Ident:
+ obj = info.Uses[fun] // type, var, builtin, or declared func
+ case *ast.SelectorExpr:
+ if sel, ok := info.Selections[fun]; ok {
+ obj = sel.Obj() // method or field
+ } else {
+ obj = info.Uses[fun.Sel] // qualified identifier?
+ }
+ }
+ if _, ok := obj.(*types.TypeName); ok {
+ return nil // T(x) is a conversion, not a call
+ }
+ // A Func is required to match instantiations.
+ if _, ok := obj.(*types.Func); isInstance && !ok {
+ return nil // Was not a Func.
+ }
+ return obj
+}
+
+// StaticCallee returns the target (function or method) of a static function
+// call, if any. It returns nil for calls to builtins.
+//
+// Note: for calls of instantiated functions and methods, StaticCallee returns
+// the corresponding generic function or method on the generic type.
+func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
+ if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
+ return f
+ }
+ return nil
+}
+
+func interfaceMethod(f *types.Func) bool {
+ recv := f.Type().(*types.Signature).Recv()
+ return recv != nil && types.IsInterface(recv.Type())
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
new file mode 100644
index 00000000..b81ce0c3
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+import "go/types"
+
+// Dependencies returns all dependencies of the specified packages.
+//
+// Dependent packages appear in topological order: if package P imports
+// package Q, Q appears earlier than P in the result.
+// The algorithm follows import statements in the order they
+// appear in the source code, so the result is a total order.
+func Dependencies(pkgs ...*types.Package) []*types.Package {
+ var result []*types.Package
+ seen := make(map[*types.Package]bool)
+ var visit func(pkgs []*types.Package)
+ visit = func(pkgs []*types.Package) {
+ for _, p := range pkgs {
+ if !seen[p] {
+ seen[p] = true
+ visit(p.Imports())
+ result = append(result, p)
+ }
+ }
+ }
+ visit(pkgs)
+ return result
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
new file mode 100644
index 00000000..8d824f71
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -0,0 +1,517 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeutil defines various utilities for types, such as Map,
+// a mapping from types.Type to any values.
+package typeutil // import "golang.org/x/tools/go/types/typeutil"
+
+import (
+ "bytes"
+ "fmt"
+ "go/types"
+ "reflect"
+
+ "golang.org/x/tools/internal/typeparams"
+)
+
+// Map is a hash-table-based mapping from types (types.Type) to
+// arbitrary any values. The concrete types that implement
+// the Type interface are pointers. Since they are not canonicalized,
+// == cannot be used to check for equivalence, and thus we cannot
+// simply use a Go map.
+//
+// Just as with map[K]V, a nil *Map is a valid empty map.
+//
+// Not thread-safe.
+type Map struct {
+ hasher Hasher // shared by many Maps
+ table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
+ length int // number of map entries
+}
+
+// entry is an entry (key/value association) in a hash bucket.
+type entry struct {
+ key types.Type
+ value any
+}
+
+// SetHasher sets the hasher used by Map.
+//
+// All Hashers are functionally equivalent but contain internal state
+// used to cache the results of hashing previously seen types.
+//
+// A single Hasher created by MakeHasher() may be shared among many
+// Maps. This is recommended if the instances have many keys in
+// common, as it will amortize the cost of hash computation.
+//
+// A Hasher may grow without bound as new types are seen. Even when a
+// type is deleted from the map, the Hasher never shrinks, since other
+// types in the map may reference the deleted type indirectly.
+//
+// Hashers are not thread-safe, and read-only operations such as
+// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
+// read-lock) is require around all Map operations if a shared
+// hasher is accessed from multiple threads.
+//
+// If SetHasher is not called, the Map will create a private hasher at
+// the first call to Insert.
+func (m *Map) SetHasher(hasher Hasher) {
+ m.hasher = hasher
+}
+
+// Delete removes the entry with the given key, if any.
+// It returns true if the entry was found.
+func (m *Map) Delete(key types.Type) bool {
+ if m != nil && m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ for i, e := range bucket {
+ if e.key != nil && types.Identical(key, e.key) {
+ // We can't compact the bucket as it
+ // would disturb iterators.
+ bucket[i] = entry{}
+ m.length--
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// At returns the map entry for the given key.
+// The result is nil if the entry is not present.
+func (m *Map) At(key types.Type) any {
+ if m != nil && m.table != nil {
+ for _, e := range m.table[m.hasher.Hash(key)] {
+ if e.key != nil && types.Identical(key, e.key) {
+ return e.value
+ }
+ }
+ }
+ return nil
+}
+
+// Set sets the map entry for key to val,
+// and returns the previous entry, if any.
+func (m *Map) Set(key types.Type, value any) (prev any) {
+ if m.table != nil {
+ hash := m.hasher.Hash(key)
+ bucket := m.table[hash]
+ var hole *entry
+ for i, e := range bucket {
+ if e.key == nil {
+ hole = &bucket[i]
+ } else if types.Identical(key, e.key) {
+ prev = e.value
+ bucket[i].value = value
+ return
+ }
+ }
+
+ if hole != nil {
+ *hole = entry{key, value} // overwrite deleted entry
+ } else {
+ m.table[hash] = append(bucket, entry{key, value})
+ }
+ } else {
+ if m.hasher.memo == nil {
+ m.hasher = MakeHasher()
+ }
+ hash := m.hasher.Hash(key)
+ m.table = map[uint32][]entry{hash: {entry{key, value}}}
+ }
+
+ m.length++
+ return
+}
+
+// Len returns the number of map entries.
+func (m *Map) Len() int {
+ if m != nil {
+ return m.length
+ }
+ return 0
+}
+
+// Iterate calls function f on each entry in the map in unspecified order.
+//
+// If f should mutate the map, Iterate provides the same guarantees as
+// Go maps: if f deletes a map entry that Iterate has not yet reached,
+// f will not be invoked for it, but if f inserts a map entry that
+// Iterate has not yet reached, whether or not f will be invoked for
+// it is unspecified.
+func (m *Map) Iterate(f func(key types.Type, value any)) {
+ if m != nil {
+ for _, bucket := range m.table {
+ for _, e := range bucket {
+ if e.key != nil {
+ f(e.key, e.value)
+ }
+ }
+ }
+ }
+}
+
+// Keys returns a new slice containing the set of map keys.
+// The order is unspecified.
+func (m *Map) Keys() []types.Type {
+ keys := make([]types.Type, 0, m.Len())
+ m.Iterate(func(key types.Type, _ any) {
+ keys = append(keys, key)
+ })
+ return keys
+}
+
+func (m *Map) toString(values bool) string {
+ if m == nil {
+ return "{}"
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "{")
+ sep := ""
+ m.Iterate(func(key types.Type, value any) {
+ fmt.Fprint(&buf, sep)
+ sep = ", "
+ fmt.Fprint(&buf, key)
+ if values {
+ fmt.Fprintf(&buf, ": %q", value)
+ }
+ })
+ fmt.Fprint(&buf, "}")
+ return buf.String()
+}
+
+// String returns a string representation of the map's entries.
+// Values are printed using fmt.Sprintf("%v", v).
+// Order is unspecified.
+func (m *Map) String() string {
+ return m.toString(true)
+}
+
+// KeysString returns a string representation of the map's key set.
+// Order is unspecified.
+func (m *Map) KeysString() string {
+ return m.toString(false)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Hasher
+
+// A Hasher maps each type to its hash value.
+// For efficiency, a hasher uses memoization; thus its memory
+// footprint grows monotonically over time.
+// Hashers are not thread-safe.
+// Hashers have reference semantics.
+// Call MakeHasher to create a Hasher.
+type Hasher struct {
+ memo map[types.Type]uint32
+
+ // ptrMap records pointer identity.
+ ptrMap map[any]uint32
+
+ // sigTParams holds type parameters from the signature being hashed.
+ // Signatures are considered identical modulo renaming of type parameters, so
+ // within the scope of a signature type the identity of the signature's type
+ // parameters is just their index.
+ //
+ // Since the language does not currently support referring to uninstantiated
+ // generic types or functions, and instantiated signatures do not have type
+ // parameter lists, we should never encounter a second non-empty type
+ // parameter list when hashing a generic signature.
+ sigTParams *types.TypeParamList
+}
+
+// MakeHasher returns a new Hasher instance.
+func MakeHasher() Hasher {
+ return Hasher{
+ memo: make(map[types.Type]uint32),
+ ptrMap: make(map[any]uint32),
+ sigTParams: nil,
+ }
+}
+
+// Hash computes a hash value for the given type t such that
+// Identical(t, t') => Hash(t) == Hash(t').
+func (h Hasher) Hash(t types.Type) uint32 {
+ hash, ok := h.memo[t]
+ if !ok {
+ hash = h.hashFor(t)
+ h.memo[t] = hash
+ }
+ return hash
+}
+
+// hashString computes the Fowler–Noll–Vo hash of s.
+func hashString(s string) uint32 {
+ var h uint32
+ for i := 0; i < len(s); i++ {
+ h ^= uint32(s[i])
+ h *= 16777619
+ }
+ return h
+}
+
+// hashFor computes the hash of t.
+func (h Hasher) hashFor(t types.Type) uint32 {
+ // See Identical for rationale.
+ switch t := t.(type) {
+ case *types.Basic:
+ return uint32(t.Kind())
+
+ case *types.Alias:
+ return h.Hash(types.Unalias(t))
+
+ case *types.Array:
+ return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+
+ case *types.Slice:
+ return 9049 + 2*h.Hash(t.Elem())
+
+ case *types.Struct:
+ var hash uint32 = 9059
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ f := t.Field(i)
+ if f.Anonymous() {
+ hash += 8861
+ }
+ hash += hashString(t.Tag(i))
+ hash += hashString(f.Name()) // (ignore f.Pkg)
+ hash += h.Hash(f.Type())
+ }
+ return hash
+
+ case *types.Pointer:
+ return 9067 + 2*h.Hash(t.Elem())
+
+ case *types.Signature:
+ var hash uint32 = 9091
+ if t.Variadic() {
+ hash *= 8863
+ }
+
+ // Use a separate hasher for types inside of the signature, where type
+ // parameter identity is modified to be (index, constraint). We must use a
+ // new memo for this hasher as type identity may be affected by this
+ // masking. For example, in func[T any](*T), the identity of *T depends on
+ // whether we are mapping the argument in isolation, or recursively as part
+ // of hashing the signature.
+ //
+ // We should never encounter a generic signature while hashing another
+ // generic signature, but defensively set sigTParams only if h.mask is
+ // unset.
+ tparams := t.TypeParams()
+ if h.sigTParams == nil && tparams.Len() != 0 {
+ h = Hasher{
+ // There may be something more efficient than discarding the existing
+ // memo, but it would require detecting whether types are 'tainted' by
+ // references to type parameters.
+ memo: make(map[types.Type]uint32),
+ // Re-using ptrMap ensures that pointer identity is preserved in this
+ // hasher.
+ ptrMap: h.ptrMap,
+ sigTParams: tparams,
+ }
+ }
+
+ for i := 0; i < tparams.Len(); i++ {
+ tparam := tparams.At(i)
+ hash += 7 * h.Hash(tparam.Constraint())
+ }
+
+ return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
+
+ case *types.Union:
+ return h.hashUnion(t)
+
+ case *types.Interface:
+ // Interfaces are identical if they have the same set of methods, with
+ // identical names and types, and they have the same set of type
+ // restrictions. See go/types.identical for more details.
+ var hash uint32 = 9103
+
+ // Hash methods.
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ // Method order is not significant.
+ // Ignore m.Pkg().
+ m := t.Method(i)
+ // Use shallow hash on method signature to
+ // avoid anonymous interface cycles.
+ hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
+ }
+
+ // Hash type restrictions.
+ terms, err := typeparams.InterfaceTermSet(t)
+ // if err != nil t has invalid type restrictions.
+ if err == nil {
+ hash += h.hashTermSet(terms)
+ }
+
+ return hash
+
+ case *types.Map:
+ return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+
+ case *types.Chan:
+ return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+
+ case *types.Named:
+ hash := h.hashPtr(t.Obj())
+ targs := t.TypeArgs()
+ for i := 0; i < targs.Len(); i++ {
+ targ := targs.At(i)
+ hash += 2 * h.Hash(targ)
+ }
+ return hash
+
+ case *types.TypeParam:
+ return h.hashTypeParam(t)
+
+ case *types.Tuple:
+ return h.hashTuple(t)
+ }
+
+ panic(fmt.Sprintf("%T: %v", t, t))
+}
+
+func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+ // See go/types.identicalTypes for rationale.
+ n := tuple.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 3 * h.Hash(tuple.At(i).Type())
+ }
+ return hash
+}
+
+func (h Hasher) hashUnion(t *types.Union) uint32 {
+ // Hash type restrictions.
+ terms, err := typeparams.UnionTermSet(t)
+ // if err != nil t has invalid type restrictions. Fall back on a non-zero
+ // hash.
+ if err != nil {
+ return 9151
+ }
+ return h.hashTermSet(terms)
+}
+
+func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
+ hash := 9157 + 2*uint32(len(terms))
+ for _, term := range terms {
+ // term order is not significant.
+ termHash := h.Hash(term.Type())
+ if term.Tilde() {
+ termHash *= 9161
+ }
+ hash += 3 * termHash
+ }
+ return hash
+}
+
+// hashTypeParam returns a hash of the type parameter t, with a hash value
+// depending on whether t is contained in h.sigTParams.
+//
+// If h.sigTParams is set and contains t, then we are in the process of hashing
+// a signature, and the hash value of t must depend only on t's index and
+// constraint: signatures are considered identical modulo type parameter
+// renaming. To avoid infinite recursion, we only hash the type parameter
+// index, and rely on types.Identical to handle signatures where constraints
+// are not identical.
+//
+// Otherwise the hash of t depends only on t's pointer identity.
+func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
+ if h.sigTParams != nil {
+ i := t.Index()
+ if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
+ return 9173 + 3*uint32(i)
+ }
+ }
+ return h.hashPtr(t.Obj())
+}
+
+// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
+// pointers values are not dependent on the GC.
+func (h Hasher) hashPtr(ptr any) uint32 {
+ if hash, ok := h.ptrMap[ptr]; ok {
+ return hash
+ }
+ hash := uint32(reflect.ValueOf(ptr).Pointer())
+ h.ptrMap[ptr] = hash
+ return hash
+}
+
+// shallowHash computes a hash of t without looking at any of its
+// element Types, to avoid potential anonymous cycles in the types of
+// interface methods.
+//
+// When an unnamed non-empty interface type appears anywhere among the
+// arguments or results of an interface method, there is a potential
+// for endless recursion. Consider:
+//
+// type X interface { m() []*interface { X } }
+//
+// The problem is that the Methods of the interface in m's result type
+// include m itself; there is no mention of the named type X that
+// might help us break the cycle.
+// (See comment in go/types.identical, case *Interface, for more.)
+func (h Hasher) shallowHash(t types.Type) uint32 {
+ // t is the type of an interface method (Signature),
+ // its params or results (Tuples), or their immediate
+ // elements (mostly Slice, Pointer, Basic, Named),
+ // so there's no need to optimize anything else.
+ switch t := t.(type) {
+ case *types.Alias:
+ return h.shallowHash(types.Unalias(t))
+
+ case *types.Signature:
+ var hash uint32 = 604171
+ if t.Variadic() {
+ hash *= 971767
+ }
+ // The Signature/Tuple recursion is always finite
+ // and invariably shallow.
+ return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
+
+ case *types.Tuple:
+ n := t.Len()
+ hash := 9137 + 2*uint32(n)
+ for i := 0; i < n; i++ {
+ hash += 53471161 * h.shallowHash(t.At(i).Type())
+ }
+ return hash
+
+ case *types.Basic:
+ return 45212177 * uint32(t.Kind())
+
+ case *types.Array:
+ return 1524181 + 2*uint32(t.Len())
+
+ case *types.Slice:
+ return 2690201
+
+ case *types.Struct:
+ return 3326489
+
+ case *types.Pointer:
+ return 4393139
+
+ case *types.Union:
+ return 562448657
+
+ case *types.Interface:
+ return 2124679 // no recursion here
+
+ case *types.Map:
+ return 9109
+
+ case *types.Chan:
+ return 9127
+
+ case *types.Named:
+ return h.hashPtr(t.Obj())
+
+ case *types.TypeParam:
+ return h.hashPtr(t.Obj())
+ }
+ panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
new file mode 100644
index 00000000..f7666028
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a cache of method sets.
+
+package typeutil
+
+import (
+ "go/types"
+ "sync"
+)
+
+// A MethodSetCache records the method set of each type T for which
+// MethodSet(T) is called so that repeat queries are fast.
+// The zero value is a ready-to-use cache instance.
+type MethodSetCache struct {
+ mu sync.Mutex
+ named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
+ others map[types.Type]*types.MethodSet // all other types
+}
+
+// MethodSet returns the method set of type T. It is thread-safe.
+//
+// If cache is nil, this function is equivalent to types.NewMethodSet(T).
+// Utility functions can thus expose an optional *MethodSetCache
+// parameter to clients that care about performance.
+func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
+ if cache == nil {
+ return types.NewMethodSet(T)
+ }
+ cache.mu.Lock()
+ defer cache.mu.Unlock()
+
+ switch T := types.Unalias(T).(type) {
+ case *types.Named:
+ return cache.lookupNamed(T).value
+
+ case *types.Pointer:
+ if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
+ return cache.lookupNamed(N).pointer
+ }
+ }
+
+ // all other types
+ // (The map uses pointer equivalence, not type identity.)
+ mset := cache.others[T]
+ if mset == nil {
+ mset = types.NewMethodSet(T)
+ if cache.others == nil {
+ cache.others = make(map[types.Type]*types.MethodSet)
+ }
+ cache.others[T] = mset
+ }
+ return mset
+}
+
+func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
+ if cache.named == nil {
+ cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
+ }
+ // Avoid recomputing mset(*T) for each distinct Pointer
+ // instance whose underlying type is a named type.
+ msets, ok := cache.named[named]
+ if !ok {
+ msets.value = types.NewMethodSet(named)
+ msets.pointer = types.NewMethodSet(types.NewPointer(named))
+ cache.named[named] = msets
+ }
+ return msets
+}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
new file mode 100644
index 00000000..9dda6a25
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
@@ -0,0 +1,53 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeutil
+
+// This file defines utilities for user interfaces that display types.
+
+import (
+ "go/types"
+)
+
+// IntuitiveMethodSet returns the intuitive method set of a type T,
+// which is the set of methods you can call on an addressable value of
+// that type.
+//
+// The result always contains MethodSet(T), and is exactly MethodSet(T)
+// for interface types and for pointer-to-concrete types.
+// For all other concrete types T, the result additionally
+// contains each method belonging to *T if there is no identically
+// named method on T itself.
+//
+// This corresponds to user intuition about method sets;
+// this function is intended only for user interfaces.
+//
+// The order of the result is as for types.MethodSet(T).
+func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
+ isPointerToConcrete := func(T types.Type) bool {
+ ptr, ok := types.Unalias(T).(*types.Pointer)
+ return ok && !types.IsInterface(ptr.Elem())
+ }
+
+ var result []*types.Selection
+ mset := msets.MethodSet(T)
+ if types.IsInterface(T) || isPointerToConcrete(T) {
+ for i, n := 0, mset.Len(); i < n; i++ {
+ result = append(result, mset.At(i))
+ }
+ } else {
+ // T is some other concrete type.
+ // Report methods of T and *T, preferring those of T.
+ pmset := msets.MethodSet(types.NewPointer(T))
+ for i, n := 0, pmset.Len(); i < n; i++ {
+ meth := pmset.At(i)
+ if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
+ meth = m
+ }
+ result = append(result, meth)
+ }
+
+ }
+ return result
+}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go
index c24c2eee..b9425f5a 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go
@@ -22,11 +22,17 @@ import (
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
-func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName {
+//
+// Precondition: enabled || len(tparams)==0.
+// If materialized aliases are disabled, there must not be any type parameters.
+func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
- newAlias(tname, rhs)
+ SetTypeParams(types.NewAlias(tname, rhs), tparams)
return tname
}
+ if len(tparams) > 0 {
+ panic("cannot create an alias with type parameters when gotypesalias is not enabled")
+ }
return types.NewTypeName(pos, pkg, name, rhs)
}
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
deleted file mode 100644
index c027b9f3..00000000
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package aliases
-
-import (
- "go/types"
-)
-
-// Alias is a placeholder for a go/types.Alias for <=1.21.
-// It will never be created by go/types.
-type Alias struct{}
-
-func (*Alias) String() string { panic("unreachable") }
-func (*Alias) Underlying() types.Type { panic("unreachable") }
-func (*Alias) Obj() *types.TypeName { panic("unreachable") }
-func Rhs(alias *Alias) types.Type { panic("unreachable") }
-
-// Unalias returns the type t for go <=1.21.
-func Unalias(t types.Type) types.Type { return t }
-
-func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") }
-
-// Enabled reports whether [NewAlias] should create [types.Alias] types.
-//
-// Before go1.22, this function always returns false.
-func Enabled() bool { return false }
diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
index b3299548..7716a333 100644
--- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
+++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build go1.22
-// +build go1.22
-
package aliases
import (
@@ -14,31 +11,51 @@ import (
"go/types"
)
-// Alias is an alias of types.Alias.
-type Alias = types.Alias
-
// Rhs returns the type on the right-hand side of the alias declaration.
-func Rhs(alias *Alias) types.Type {
+func Rhs(alias *types.Alias) types.Type {
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
return alias.Rhs() // go1.23+
}
// go1.22's Alias didn't have the Rhs method,
// so Unalias is the best we can do.
- return Unalias(alias)
+ return types.Unalias(alias)
+}
+
+// TypeParams returns the type parameter list of the alias.
+func TypeParams(alias *types.Alias) *types.TypeParamList {
+ if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
+ return alias.TypeParams() // go1.23+
+ }
+ return nil
+}
+
+// SetTypeParams sets the type parameters of the alias type.
+func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
+ if alias, ok := any(alias).(interface {
+ SetTypeParams(tparams []*types.TypeParam)
+ }); ok {
+ alias.SetTypeParams(tparams) // go1.23+
+ } else if len(tparams) > 0 {
+ panic("cannot set type parameters of an Alias type in go1.22")
+ }
+}
+
+// TypeArgs returns the type arguments used to instantiate the Alias type.
+func TypeArgs(alias *types.Alias) *types.TypeList {
+ if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
+ return alias.TypeArgs() // go1.23+
+ }
+ return nil // empty (go1.22)
}
-// Unalias is a wrapper of types.Unalias.
-func Unalias(t types.Type) types.Type { return types.Unalias(t) }
-
-// newAlias is an internal alias around types.NewAlias.
-// Direct usage is discouraged as the moment.
-// Try to use NewAlias instead.
-func newAlias(tname *types.TypeName, rhs types.Type) *Alias {
- a := types.NewAlias(tname, rhs)
- // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect.
- Unalias(a)
- return a
+// Origin returns the generic Alias type of which alias is an instance.
+// If alias is not an instance of a generic alias, Origin returns alias.
+func Origin(alias *types.Alias) *types.Alias {
+ if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
+ return alias.Origin() // go1.23+
+ }
+ return alias // not an instance of a generic alias (go1.22)
}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
@@ -56,7 +73,7 @@ func Enabled() bool {
// many tests. Therefore any attempt to cache the result
// is just incorrect.
fset := token.NewFileSet()
- f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0)
+ f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
return enabled
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index d98b0db2..d79a605e 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir {
return 0
}
}
-
-var predeclOnce sync.Once
-var predecl []types.Type // initialized lazily
-
-func predeclared() []types.Type {
- predeclOnce.Do(func() {
- // initialize lazily to be sure that all
- // elements have been initialized before
- predecl = []types.Type{ // basic types
- types.Typ[types.Bool],
- types.Typ[types.Int],
- types.Typ[types.Int8],
- types.Typ[types.Int16],
- types.Typ[types.Int32],
- types.Typ[types.Int64],
- types.Typ[types.Uint],
- types.Typ[types.Uint8],
- types.Typ[types.Uint16],
- types.Typ[types.Uint32],
- types.Typ[types.Uint64],
- types.Typ[types.Uintptr],
- types.Typ[types.Float32],
- types.Typ[types.Float64],
- types.Typ[types.Complex64],
- types.Typ[types.Complex128],
- types.Typ[types.String],
-
- // basic type aliases
- types.Universe.Lookup("byte").Type(),
- types.Universe.Lookup("rune").Type(),
-
- // error
- types.Universe.Lookup("error").Type(),
-
- // untyped types
- types.Typ[types.UntypedBool],
- types.Typ[types.UntypedInt],
- types.Typ[types.UntypedRune],
- types.Typ[types.UntypedFloat],
- types.Typ[types.UntypedComplex],
- types.Typ[types.UntypedString],
- types.Typ[types.UntypedNil],
-
- // package unsafe
- types.Typ[types.UnsafePointer],
-
- // invalid type
- types.Typ[types.Invalid], // only appears in packages with errors
-
- // used internally by gc; never used by this package or in .a files
- anyType{},
- }
- predecl = append(predecl, additionalPredeclared()...)
- })
- return predecl
-}
-
-type anyType struct{}
-
-func (t anyType) Underlying() types.Type { return t }
-func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index 39df9112..e6c5d51f 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
// Select appropriate importer.
if len(data) > 0 {
switch data[0] {
- case 'v', 'c', 'd': // binary, till go1.10
+ case 'v', 'c', 'd':
+ // binary: emitted by cmd/compile till go1.10; obsolete.
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
- case 'i': // indexed, till go1.19
+ case 'i':
+ // indexed: emitted by cmd/compile till go1.19;
+ // now used only for serializing go/types.
+ // See https://github.com/golang/go/issues/69491.
_, pkg, err := IImportData(fset, packages, data[1:], id)
return pkg, err
- case 'u': // unified, from go1.20
+ case 'u':
+ // unified: emitted by cmd/compile since go1.20.
_, pkg, err := UImportData(fset, packages, data[1:size], id)
return pkg, err
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index deeb67f3..1e19fbed 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -2,9 +2,227 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Indexed binary package export.
-// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go;
-// see that file for specification of the format.
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A' or 'B'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'B'
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
package gcimporter
@@ -24,7 +242,6 @@ import (
"golang.org/x/tools/go/types/objectpath"
"golang.org/x/tools/internal/aliases"
- "golang.org/x/tools/internal/tokeninternal"
)
// IExportShallow encodes "shallow" export data for the specified package.
@@ -223,7 +440,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
// Sort the set of needed offsets. Duplicates are harmless.
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
- lines := tokeninternal.GetLines(file) // byte offset of each line start
+ lines := file.Lines() // byte offset of each line start
w.uint64(uint64(len(lines)))
// Rather than record the entire array of line start offsets,
@@ -507,13 +724,13 @@ func (p *iexporter) doDecl(obj types.Object) {
case *types.TypeName:
t := obj.Type()
- if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok {
+ if tparam, ok := types.Unalias(t).(*types.TypeParam); ok {
w.tag(typeParamTag)
w.pos(obj.Pos())
constraint := tparam.Constraint()
if p.version >= iexportVersionGo1_18 {
implicit := false
- if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil {
+ if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil {
implicit = iface.IsImplicit()
}
w.bool(implicit)
@@ -523,9 +740,22 @@ func (p *iexporter) doDecl(obj types.Object) {
}
if obj.IsAlias() {
- w.tag(aliasTag)
+ alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled
+
+ var tparams *types.TypeParamList
+ if materialized {
+ tparams = aliases.TypeParams(alias)
+ }
+ if tparams.Len() == 0 {
+ w.tag(aliasTag)
+ } else {
+ w.tag(genericAliasTag)
+ }
w.pos(obj.Pos())
- if alias, ok := t.(*aliases.Alias); ok {
+ if tparams.Len() > 0 {
+ w.tparamList(obj.Name(), tparams, obj.Pkg())
+ }
+ if materialized {
// Preserve materialized aliases,
// even of non-exported types.
t = aliases.Rhs(alias)
@@ -744,8 +974,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
}()
}
switch t := t.(type) {
- case *aliases.Alias:
- // TODO(adonovan): support parameterized aliases, following *types.Named.
+ case *types.Alias:
+ if targs := aliases.TypeArgs(t); targs.Len() > 0 {
+ w.startType(instanceType)
+ w.pos(t.Obj().Pos())
+ w.typeList(targs, pkg)
+ w.typ(aliases.Origin(t), pkg)
+ return
+ }
w.startType(aliasType)
w.qualifiedType(t.Obj())
@@ -854,7 +1090,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
for i := 0; i < n; i++ {
ft := t.EmbeddedType(i)
tPkg := pkg
- if named, _ := aliases.Unalias(ft).(*types.Named); named != nil {
+ if named, _ := types.Unalias(ft).(*types.Named); named != nil {
w.pos(named.Obj().Pos())
} else {
w.pos(token.NoPos)
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index 136aa036..21908a15 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Indexed package import.
-// See cmd/compile/internal/gc/iexport.go for the export data format.
+// See iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
@@ -53,6 +53,7 @@ const (
iexportVersionPosCol = 1
iexportVersionGo1_18 = 2
iexportVersionGenerics = 2
+ iexportVersion = iexportVersionGenerics
iexportVersionCurrent = 2
)
@@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool {
if def == nil {
return true
}
- iface, _ := aliases.Unalias(rhs).(*types.Interface)
+ iface, _ := types.Unalias(rhs).(*types.Interface)
if iface == nil {
return true
}
@@ -562,14 +563,14 @@ func (r *importReader) obj(name string) {
pos := r.pos()
switch tag {
- case aliasTag:
+ case aliasTag, genericAliasTag:
+ var tparams []*types.TypeParam
+ if tag == genericAliasTag {
+ tparams = r.tparamList()
+ }
typ := r.typ()
- // TODO(adonovan): support generic aliases:
- // if tag == genericAliasTag {
- // tparams := r.tparamList()
- // alias.SetTypeParams(tparams)
- // }
- r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ))
+ obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams)
+ r.declare(obj)
case constTag:
typ, val := r.value()
@@ -615,7 +616,7 @@ func (r *importReader) obj(name string) {
if targs.Len() > 0 {
rparams = make([]*types.TypeParam, targs.Len())
for i := range rparams {
- rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam)
+ rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam)
}
}
msig := r.signature(recv, rparams, nil)
@@ -645,7 +646,7 @@ func (r *importReader) obj(name string) {
}
constraint := r.typ()
if implicit {
- iface, _ := aliases.Unalias(constraint).(*types.Interface)
+ iface, _ := types.Unalias(constraint).(*types.Interface)
if iface == nil {
errorf("non-interface constraint marked implicit")
}
@@ -852,7 +853,7 @@ func (r *importReader) typ() types.Type {
}
func isInterface(t types.Type) bool {
- _, ok := aliases.Unalias(t).(*types.Interface)
+ _, ok := types.Unalias(t).(*types.Interface)
return ok
}
@@ -862,7 +863,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) (res types.Type) {
k := r.kind()
if debug {
- r.p.trace("importing type %d (base: %s)", k, base)
+ r.p.trace("importing type %d (base: %v)", k, base)
r.p.indent++
defer func() {
r.p.indent--
@@ -959,7 +960,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) {
methods[i] = method
}
- typ := newInterface(methods, embeddeds)
+ typ := types.NewInterfaceType(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
@@ -1051,7 +1052,7 @@ func (r *importReader) tparamList() []*types.TypeParam {
for i := range xs {
// Note: the standard library importer is tolerant of nil types here,
// though would panic in SetTypeParams.
- xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam)
+ xs[i] = types.Unalias(r.typ()).(*types.TypeParam)
}
return xs
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
deleted file mode 100644
index 8b163e3d..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.11
-// +build !go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
- named := make([]*types.Named, len(embeddeds))
- for i, e := range embeddeds {
- var ok bool
- named[i], ok = e.(*types.Named)
- if !ok {
- panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
- }
- }
- return types.NewInterface(methods, named)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
deleted file mode 100644
index 49984f40..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.11
-// +build go1.11
-
-package gcimporter
-
-import "go/types"
-
-func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
- return types.NewInterfaceType(methods, embeddeds)
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
new file mode 100644
index 00000000..907c8557
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go
@@ -0,0 +1,91 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "go/types"
+ "sync"
+)
+
+// predecl is a cache for the predeclared types in types.Universe.
+//
+// Cache a distinct result based on the runtime value of any.
+// The pointer value of the any type varies based on GODEBUG settings.
+var predeclMu sync.Mutex
+var predecl map[types.Type][]types.Type
+
+func predeclared() []types.Type {
+ anyt := types.Universe.Lookup("any").Type()
+
+ predeclMu.Lock()
+ defer predeclMu.Unlock()
+
+ if pre, ok := predecl[anyt]; ok {
+ return pre
+ }
+
+ if predecl == nil {
+ predecl = make(map[types.Type][]types.Type)
+ }
+
+ decls := []types.Type{ // basic types
+ types.Typ[types.Bool],
+ types.Typ[types.Int],
+ types.Typ[types.Int8],
+ types.Typ[types.Int16],
+ types.Typ[types.Int32],
+ types.Typ[types.Int64],
+ types.Typ[types.Uint],
+ types.Typ[types.Uint8],
+ types.Typ[types.Uint16],
+ types.Typ[types.Uint32],
+ types.Typ[types.Uint64],
+ types.Typ[types.Uintptr],
+ types.Typ[types.Float32],
+ types.Typ[types.Float64],
+ types.Typ[types.Complex64],
+ types.Typ[types.Complex128],
+ types.Typ[types.String],
+
+ // basic type aliases
+ types.Universe.Lookup("byte").Type(),
+ types.Universe.Lookup("rune").Type(),
+
+ // error
+ types.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types.Typ[types.UntypedBool],
+ types.Typ[types.UntypedInt],
+ types.Typ[types.UntypedRune],
+ types.Typ[types.UntypedFloat],
+ types.Typ[types.UntypedComplex],
+ types.Typ[types.UntypedString],
+ types.Typ[types.UntypedNil],
+
+ // package unsafe
+ types.Typ[types.UnsafePointer],
+
+ // invalid type
+ types.Typ[types.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ anyType{},
+
+ // comparable
+ types.Universe.Lookup("comparable").Type(),
+
+ // any
+ anyt,
+ }
+
+ predecl[anyt] = decls
+ return decls
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
deleted file mode 100644
index 0cd3b91b..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package gcimporter
-
-import "go/types"
-
-const iexportVersion = iexportVersionGenerics
-
-// additionalPredeclared returns additional predeclared types in go.1.18.
-func additionalPredeclared() []types.Type {
- return []types.Type{
- // comparable
- types.Universe.Lookup("comparable").Type(),
-
- // any
- types.Universe.Lookup("any").Type(),
- }
-}
-
-// See cmd/compile/internal/types.SplitVargenSuffix.
-func splitVargenSuffix(name string) (base, suffix string) {
- i := len(name)
- for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
- i--
- }
- const dot = "·"
- if i >= len(dot) && name[i-len(dot):i] == dot {
- i -= len(dot)
- return name[:i], name[i:]
- }
- return name, ""
-}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
deleted file mode 100644
index 38b624ca..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !goexperiment.unified
-// +build !goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = false
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
deleted file mode 100644
index b5118d0b..00000000
--- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build goexperiment.unified
-// +build goexperiment.unified
-
-package gcimporter
-
-const unifiedIR = true
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 2c077068..1db40861 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) {
// See cmd/compile/internal/noder.derivedInfo.
type derivedInfo struct {
- idx pkgbits.Index
- needed bool
+ idx pkgbits.Index
}
// See cmd/compile/internal/noder.typeInfo.
@@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic)
pkg := r.pkg()
- r.Bool() // has init
+ if r.Version().Has(pkgbits.HasInit) {
+ r.Bool()
+ }
for i, n := 0, r.Len(); i < n; i++ {
// As if r.obj(), but avoiding the Scope.Lookup call,
// to avoid eager loading of imports.
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
r.p.objIdx(r.Reloc(pkgbits.RelocObj))
assert(r.Len() == 0)
}
@@ -165,7 +168,7 @@ type readerDict struct {
// tparams is a slice of the constructed TypeParams for the element.
tparams []*types.TypeParam
- // devived is a slice of types derived from tparams, which may be
+ // derived is a slice of types derived from tparams, which may be
// instantiated while reading the current element.
derived []derivedInfo
derivedTypes []types.Type // lazily instantiated from derived
@@ -471,7 +474,9 @@ func (r *reader) param() *types.Var {
func (r *reader) obj() (types.Object, []types.Type) {
r.Sync(pkgbits.SyncObject)
- assert(!r.Bool())
+ if r.Version().Has(pkgbits.DerivedFuncInstance) {
+ assert(!r.Bool())
+ }
pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj))
obj := pkgScope(pkg).Lookup(name)
@@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjAlias:
pos := r.pos()
+ var tparams []*types.TypeParam
+ if r.Version().Has(pkgbits.AliasTypeParamNames) {
+ tparams = r.typeParamNames()
+ }
typ := r.typ()
- declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ))
+ declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams))
case pkgbits.ObjConst:
pos := r.pos()
@@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
// If the underlying type is an interface, we need to
// duplicate its methods so we can replace the receiver
// parameter's type (#49906).
- if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
+ if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 {
methods := make([]*types.Func, iface.NumExplicitMethods())
for i := range methods {
fn := iface.ExplicitMethod(i)
@@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict {
dict.derived = make([]derivedInfo, r.Len())
dict.derivedTypes = make([]types.Type, len(dict.derived))
for i := range dict.derived {
- dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()}
+ dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)}
+ if r.Version().Has(pkgbits.DerivedInfoNeeded) {
+ assert(!r.Bool())
+ }
}
pr.retireReader(r)
@@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope {
}
return types.Universe
}
+
+// See cmd/compile/internal/types.SplitVargenSuffix.
+func splitVargenSuffix(name string) (base, suffix string) {
+ i := len(name)
+ for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && name[i-len(dot):i] == dot {
+ i -= len(dot)
+ return name[:i], name[i:]
+ }
+ return name, ""
+}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index 2e59ff85..e333efc8 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -16,7 +16,6 @@ import (
"os"
"os/exec"
"path/filepath"
- "reflect"
"regexp"
"runtime"
"strconv"
@@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Stdout = stdout
cmd.Stderr = stderr
- // cmd.WaitDelay was added only in go1.20 (see #50436).
- if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() {
- // https://go.dev/issue/59541: don't wait forever copying stderr
- // after the command has exited.
- // After CL 484741 we copy stdout manually, so we we'll stop reading that as
- // soon as ctx is done. However, we also don't want to wait around forever
- // for stderr. Give a much-longer-than-reasonable delay and then assume that
- // something has wedged in the kernel or runtime.
- waitDelay.Set(reflect.ValueOf(30 * time.Second))
- }
+ // https://go.dev/issue/59541: don't wait forever copying stderr
+ // after the command has exited.
+ // After CL 484741 we copy stdout manually, so we we'll stop reading that as
+ // soon as ctx is done. However, we also don't want to wait around forever
+ // for stderr. Give a much-longer-than-reasonable delay and then assume that
+ // something has wedged in the kernel or runtime.
+ cmd.WaitDelay = 30 * time.Second
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index dc7d50a7..c1510817 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -131,7 +131,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename
continue
}
- f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0)
+ f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution)
if err != nil {
continue
}
@@ -1620,6 +1620,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
fullFile := filepath.Join(dir, fi.Name())
+ // Legacy ast.Object resolution is needed here.
f, err := parser.ParseFile(fset, fullFile, nil, 0)
if err != nil {
env.logf("error parsing %v: %v", fullFile, err)
diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go
index f8346552..ff6b59a5 100644
--- a/vendor/golang.org/x/tools/internal/imports/imports.go
+++ b/vendor/golang.org/x/tools/internal/imports/imports.go
@@ -86,7 +86,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
// Don't use parse() -- we don't care about fragments or statement lists
// here, and we need to work with unparseable files.
fileSet := token.NewFileSet()
- parserMode := parser.Mode(0)
+ parserMode := parser.SkipObjectResolution
if opt.Comments {
parserMode |= parser.ParseComments
}
@@ -165,7 +165,7 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
- parserMode := parser.Mode(0)
+ var parserMode parser.Mode // legacy ast.Object resolution is required here
if opt.Comments {
parserMode |= parser.ParseComments
}
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 91221fda..8555e3f8 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe
// 2. Use this to separate module cache scanning from other scanning.
func gomodcacheForEnv(goenv map[string]string) string {
if gmc := goenv["GOMODCACHE"]; gmc != "" {
- return gmc
+ // golang/go#67156: ensure that the module cache is clean, since it is
+ // assumed as a prefix to directories scanned by gopathwalk, which are
+ // themselves clean.
+ return filepath.Clean(gmc)
}
gopaths := filepath.SplitList(goenv["GOPATH"])
if len(gopaths) == 0 {
@@ -740,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
- if dir != root.Path {
- subdir = dir[len(root.Path)+len("/"):]
+ if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) {
+ subdir = dir[len(prefix):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index 2acd8585..f6cb37c5 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -21,10 +21,7 @@ import (
// export data.
type PkgDecoder struct {
// version is the file format version.
- version uint32
-
- // aliases determines whether types.Aliases should be created
- aliases bool
+ version Version
// sync indicates whether the file uses sync markers.
sync bool
@@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync }
// NewPkgDecoder returns a PkgDecoder initialized to read the Unified
// IR export data from input. pkgPath is the package path for the
// compilation unit that produced the export data.
-//
-// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014.
func NewPkgDecoder(pkgPath, input string) PkgDecoder {
pr := PkgDecoder{
pkgPath: pkgPath,
- //aliases: aliases.Enabled(),
}
// TODO(mdempsky): Implement direct indexing of input string to
@@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
r := strings.NewReader(input)
- assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil)
+ var ver uint32
+ assert(binary.Read(r, binary.LittleEndian, &ver) == nil)
+ pr.version = Version(ver)
- switch pr.version {
- default:
- panic(fmt.Errorf("unsupported version: %v", pr.version))
- case 0:
- // no flags
- case 1:
+ if pr.version >= numVersions {
+ panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1))
+ }
+
+ if pr.version.Has(Flags) {
var flags uint32
assert(binary.Read(r, binary.LittleEndian, &flags) == nil)
pr.sync = flags&flagSyncMarkers != 0
@@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder {
assert(err == nil)
pr.elemData = input[pos:]
- assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ const fingerprintSize = 8
+ assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1]))
return pr
}
@@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int {
absIdx += int(pr.elemEndsEnds[k-1])
}
if absIdx >= int(pr.elemEndsEnds[k]) {
- errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
}
return absIdx
}
@@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder {
Idx: idx,
}
- // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
- r.Data = *strings.NewReader(pr.DataIdx(k, idx))
-
+ r.Data.Reset(pr.DataIdx(k, idx))
r.Sync(SyncRelocs)
r.Relocs = make([]RelocEnt, r.Len())
for i := range r.Relocs {
@@ -248,7 +243,7 @@ type Decoder struct {
func (r *Decoder) checkErr(err error) {
if err != nil {
- errorf("unexpected decoding error: %w", err)
+ panicf("unexpected decoding error: %w", err)
}
}
@@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) {
return path, name, tag
}
+
+// Version reports the version of the bitstream.
+func (w *Decoder) Version() Version { return w.common.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
index 6482617a..c17a1239 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go
@@ -12,18 +12,15 @@ import (
"io"
"math/big"
"runtime"
+ "strings"
)
-// currentVersion is the current version number.
-//
-// - v0: initial prototype
-//
-// - v1: adds the flags uint32 word
-const currentVersion uint32 = 1
-
// A PkgEncoder provides methods for encoding a package's Unified IR
// export data.
type PkgEncoder struct {
+ // version of the bitstream.
+ version Version
+
// elems holds the bitstream for previously encoded elements.
elems [numRelocs][]string
@@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 }
// export data files, but can help diagnosing desync errors in
// higher-level Unified IR reader/writer code. If syncFrames is
// negative, then sync markers are omitted entirely.
-func NewPkgEncoder(syncFrames int) PkgEncoder {
+func NewPkgEncoder(version Version, syncFrames int) PkgEncoder {
return PkgEncoder{
+ version: version,
stringsIdx: make(map[string]Index),
syncFrames: syncFrames,
}
@@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) {
assert(binary.Write(out, binary.LittleEndian, x) == nil)
}
- writeUint32(currentVersion)
+ writeUint32(uint32(pw.version))
- var flags uint32
- if pw.SyncMarkers() {
- flags |= flagSyncMarkers
+ if pw.version.Has(Flags) {
+ var flags uint32
+ if pw.SyncMarkers() {
+ flags |= flagSyncMarkers
+ }
+ writeUint32(flags)
}
- writeUint32(flags)
// Write elemEndsEnds.
var sum uint32
@@ -159,7 +159,7 @@ type Encoder struct {
// Flush finalizes the element's bitstream and returns its Index.
func (w *Encoder) Flush() Index {
- var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ var sb strings.Builder
// Backup the data so we write the relocations at the front.
var tmp bytes.Buffer
@@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index {
func (w *Encoder) checkErr(err error) {
if err != nil {
- errorf("unexpected encoding error: %v", err)
+ panicf("unexpected encoding error: %v", err)
}
}
@@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) {
// section (if not already present), and then writing a relocation
// into the element bitstream.
func (w *Encoder) String(s string) {
+ w.StringRef(w.p.StringIdx(s))
+}
+
+// StringRef writes a reference to the given index, which must be a
+// previously encoded string value.
+func (w *Encoder) StringRef(idx Index) {
w.Sync(SyncString)
- w.Reloc(RelocString, w.p.StringIdx(s))
+ w.Reloc(RelocString, idx)
}
// Strings encodes and writes a variable-length slice of strings into
@@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) {
func (w *Encoder) scalar(val constant.Value) {
switch v := constant.Val(val).(type) {
default:
- errorf("unhandled %v (%v)", val, val.Kind())
+ panicf("unhandled %v (%v)", val, val.Kind())
case bool:
w.Code(ValBool)
w.Bool(v)
@@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) {
b := v.Append(nil, 'p', -1)
w.String(string(b)) // TODO: More efficient encoding.
}
+
+// Version reports the version of the bitstream.
+func (w *Encoder) Version() Version { return w.p.version }
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
deleted file mode 100644
index 5294f6a6..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-// +build !go1.7
-
-// TODO(mdempsky): Remove after #44505 is resolved
-
-package pkgbits
-
-import "runtime"
-
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- for _, pc := range pcs {
- fn := runtime.FuncForPC(pc)
- file, line := fn.FileLine(pc)
-
- visit(file, line, fn.Name(), pc-fn.Entry())
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
deleted file mode 100644
index 2324ae7a..00000000
--- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-// +build go1.7
-
-package pkgbits
-
-import "runtime"
-
-// walkFrames calls visit for each call frame represented by pcs.
-//
-// pcs should be a slice of PCs, as returned by runtime.Callers.
-func walkFrames(pcs []uintptr, visit frameVisitor) {
- if len(pcs) == 0 {
- return
- }
-
- frames := runtime.CallersFrames(pcs)
- for {
- frame, more := frames.Next()
- visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
- if !more {
- return
- }
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go
index ad26d3b2..50534a29 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/support.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go
@@ -12,6 +12,6 @@ func assert(b bool) {
}
}
-func errorf(format string, args ...interface{}) {
+func panicf(format string, args ...any) {
panic(fmt.Errorf(format, args...))
}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
index 5bd51ef7..1520b73a 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go
@@ -6,6 +6,7 @@ package pkgbits
import (
"fmt"
+ "runtime"
"strings"
)
@@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string {
type frameVisitor func(file string, line int, name string, offset uintptr)
+// walkFrames calls visit for each call frame represented by pcs.
+//
+// pcs should be a slice of PCs, as returned by runtime.Callers.
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
+
// SyncMarker is an enum type that represents markers that may be
// written to export data to ensure the reader and writer stay
// synchronized.
@@ -110,4 +129,8 @@ const (
SyncStmtsEnd
SyncLabel
SyncOptLabel
+
+ SyncMultiExpr
+ SyncRType
+ SyncConvRTTI
)
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
index 4a5b0ca5..582ad56d 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go
@@ -74,11 +74,14 @@ func _() {
_ = x[SyncStmtsEnd-64]
_ = x[SyncLabel-65]
_ = x[SyncOptLabel-66]
+ _ = x[SyncMultiExpr-67]
+ _ = x[SyncRType-68]
+ _ = x[SyncConvRTTI-69]
}
-const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel"
+const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI"
-var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458}
+var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480}
func (i SyncMarker) String() string {
i -= 1
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go
new file mode 100644
index 00000000..53af9df2
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go
@@ -0,0 +1,85 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkgbits
+
+// Version indicates a version of a unified IR bitstream.
+// Each Version indicates the addition, removal, or change of
+// new data in the bitstream.
+//
+// These are serialized to disk and the interpretation remains fixed.
+type Version uint32
+
+const (
+ // V0: initial prototype.
+ //
+ // All data that is not assigned a Field is in version V0
+ // and has not been deprecated.
+ V0 Version = iota
+
+ // V1: adds the Flags uint32 word
+ V1
+
+ // V2: removes unused legacy fields and supports type parameters for aliases.
+ // - remove the legacy "has init" bool from the public root
+ // - remove obj's "derived func instance" bool
+ // - add a TypeParamNames field to ObjAlias
+ // - remove derived info "needed" bool
+ V2
+
+ numVersions = iota
+)
+
+// Field denotes a unit of data in the serialized unified IR bitstream.
+// It is conceptually a like field in a structure.
+//
+// We only really need Fields when the data may or may not be present
+// in a stream based on the Version of the bitstream.
+//
+// Unlike much of pkgbits, Fields are not serialized and
+// can change values as needed.
+type Field int
+
+const (
+ // Flags in a uint32 in the header of a bitstream
+ // that is used to indicate whether optional features are enabled.
+ Flags Field = iota
+
+ // Deprecated: HasInit was a bool indicating whether a package
+ // has any init functions.
+ HasInit
+
+ // Deprecated: DerivedFuncInstance was a bool indicating
+ // whether an object was a function instance.
+ DerivedFuncInstance
+
+ // ObjAlias has a list of TypeParamNames.
+ AliasTypeParamNames
+
+ // Deprecated: DerivedInfoNeeded was a bool indicating
+ // whether a type was a derived type.
+ DerivedInfoNeeded
+
+ numFields = iota
+)
+
+// introduced is the version a field was added.
+var introduced = [numFields]Version{
+ Flags: V1,
+ AliasTypeParamNames: V2,
+}
+
+// removed is the version a field was removed in or 0 for fields
+// that have not yet been deprecated.
+// (So removed[f]-1 is the last version it is included in.)
+var removed = [numFields]Version{
+ HasInit: V2,
+ DerivedFuncInstance: V2,
+ DerivedInfoNeeded: V2,
+}
+
+// Has reports whether field f is present in a bitstream at version v.
+func (v Version) Has(f Field) bool {
+ return introduced[f] <= v && (v < removed[f] || removed[f] == V0)
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index a928acf2..cdaac9ab 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseSessionState", Func, 21},
{"QUICClient", Func, 21},
{"QUICConfig", Type, 21},
- {"QUICConfig.EnableStoreSessionEvent", Field, 23},
+ {"QUICConfig.EnableSessionEvents", Field, 23},
{"QUICConfig.TLSConfig", Field, 21},
{"QUICConn", Type, 21},
{"QUICEncryptionLevel", Type, 21},
diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
deleted file mode 100644
index ff9437a3..00000000
--- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// package tokeninternal provides access to some internal features of the token
-// package.
-package tokeninternal
-
-import (
- "fmt"
- "go/token"
- "sort"
- "sync"
- "unsafe"
-)
-
-// GetLines returns the table of line-start offsets from a token.File.
-func GetLines(file *token.File) []int {
- // token.File has a Lines method on Go 1.21 and later.
- if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
- return file.Lines()
- }
-
- // This declaration must match that of token.File.
- // This creates a risk of dependency skew.
- // For now we check that the size of the two
- // declarations is the same, on the (fragile) assumption
- // that future changes would add fields.
- type tokenFile119 struct {
- _ string
- _ int
- _ int
- mu sync.Mutex // we're not complete monsters
- lines []int
- _ []struct{}
- }
-
- if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) {
- panic("unexpected token.File size")
- }
- var ptr *tokenFile119
- type uP = unsafe.Pointer
- *(*uP)(uP(&ptr)) = uP(file)
- ptr.mu.Lock()
- defer ptr.mu.Unlock()
- return ptr.lines
-}
-
-// AddExistingFiles adds the specified files to the FileSet if they
-// are not already present. It panics if any pair of files in the
-// resulting FileSet would overlap.
-func AddExistingFiles(fset *token.FileSet, files []*token.File) {
- // Punch through the FileSet encapsulation.
- type tokenFileSet struct {
- // This type remained essentially consistent from go1.16 to go1.21.
- mutex sync.RWMutex
- base int
- files []*token.File
- _ *token.File // changed to atomic.Pointer[token.File] in go1.19
- }
-
- // If the size of token.FileSet changes, this will fail to compile.
- const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
- var _ [-delta * delta]int
-
- type uP = unsafe.Pointer
- var ptr *tokenFileSet
- *(*uP)(uP(&ptr)) = uP(fset)
- ptr.mutex.Lock()
- defer ptr.mutex.Unlock()
-
- // Merge and sort.
- newFiles := append(ptr.files, files...)
- sort.Slice(newFiles, func(i, j int) bool {
- return newFiles[i].Base() < newFiles[j].Base()
- })
-
- // Reject overlapping files.
- // Discard adjacent identical files.
- out := newFiles[:0]
- for i, file := range newFiles {
- if i > 0 {
- prev := newFiles[i-1]
- if file == prev {
- continue
- }
- if prev.Base()+prev.Size()+1 > file.Base() {
- panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
- prev.Name(), prev.Base(), prev.Base()+prev.Size(),
- file.Name(), file.Base(), file.Base()+file.Size()))
- }
- }
- out = append(out, file)
- }
- newFiles = out
-
- ptr.files = newFiles
-
- // Advance FileSet.Base().
- if len(newFiles) > 0 {
- last := newFiles[len(newFiles)-1]
- newBase := last.Base() + last.Size() + 1
- if ptr.base < newBase {
- ptr.base = newBase
- }
- }
-}
-
-// FileSetFor returns a new FileSet containing a sequence of new Files with
-// the same base, size, and line as the input files, for use in APIs that
-// require a FileSet.
-//
-// Precondition: the input files must be non-overlapping, and sorted in order
-// of their Base.
-func FileSetFor(files ...*token.File) *token.FileSet {
- fset := token.NewFileSet()
- for _, f := range files {
- f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
- lines := GetLines(f)
- f2.SetLines(lines)
- }
- return fset
-}
-
-// CloneFileSet creates a new FileSet holding all files in fset. It does not
-// create copies of the token.Files in fset: they are added to the resulting
-// FileSet unmodified.
-func CloneFileSet(fset *token.FileSet) *token.FileSet {
- var files []*token.File
- fset.Iterate(func(f *token.File) bool {
- files = append(files, f)
- return true
- })
- newFileSet := token.NewFileSet()
- AddExistingFiles(newFileSet, files)
- return newFileSet
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
new file mode 100644
index 00000000..0b84acc5
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -0,0 +1,140 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package typeparams contains common utilities for writing tools that
+// interact with generic Go code, as introduced with Go 1.18. It
+// supplements the standard library APIs. Notably, the StructuralTerms
+// API computes a minimal representation of the structural
+// restrictions on a type parameter.
+//
+// An external version of these APIs is available in the
+// golang.org/x/exp/typeparams module.
+package typeparams
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// UnpackIndexExpr extracts data from AST nodes that represent index
+// expressions.
+//
+// For an ast.IndexExpr, the resulting indices slice will contain exactly one
+// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
+// number of index expressions.
+//
+// For nodes that don't represent index expressions, the first return value of
+// UnpackIndexExpr will be nil.
+func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
+ switch e := n.(type) {
+ case *ast.IndexExpr:
+ return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
+ case *ast.IndexListExpr:
+ return e.X, e.Lbrack, e.Indices, e.Rbrack
+ }
+ return nil, token.NoPos, nil, token.NoPos
+}
+
+// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
+// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
+// will panic.
+func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
+ switch len(indices) {
+ case 0:
+ panic("empty indices")
+ case 1:
+ return &ast.IndexExpr{
+ X: x,
+ Lbrack: lbrack,
+ Index: indices[0],
+ Rbrack: rbrack,
+ }
+ default:
+ return &ast.IndexListExpr{
+ X: x,
+ Lbrack: lbrack,
+ Indices: indices,
+ Rbrack: rbrack,
+ }
+ }
+}
+
+// IsTypeParam reports whether t is a type parameter (or an alias of one).
+func IsTypeParam(t types.Type) bool {
+ _, ok := types.Unalias(t).(*types.TypeParam)
+ return ok
+}
+
+// GenericAssignableTo is a generalization of types.AssignableTo that
+// implements the following rule for uninstantiated generic types:
+//
+// If V and T are generic named types, then V is considered assignable to T if,
+// for every possible instantiation of V[A_1, ..., A_N], the instantiation
+// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
+//
+// If T has structural constraints, they must be satisfied by V.
+//
+// For example, consider the following type declarations:
+//
+// type Interface[T any] interface {
+// Accept(T)
+// }
+//
+// type Container[T any] struct {
+// Element T
+// }
+//
+// func (c Container[T]) Accept(t T) { c.Element = t }
+//
+// In this case, GenericAssignableTo reports that instantiations of Container
+// are assignable to the corresponding instantiation of Interface.
+func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
+ V = types.Unalias(V)
+ T = types.Unalias(T)
+
+ // If V and T are not both named, or do not have matching non-empty type
+ // parameter lists, fall back on types.AssignableTo.
+
+ VN, Vnamed := V.(*types.Named)
+ TN, Tnamed := T.(*types.Named)
+ if !Vnamed || !Tnamed {
+ return types.AssignableTo(V, T)
+ }
+
+ vtparams := VN.TypeParams()
+ ttparams := TN.TypeParams()
+ if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
+ return types.AssignableTo(V, T)
+ }
+
+ // V and T have the same (non-zero) number of type params. Instantiate both
+ // with the type parameters of V. This must always succeed for V, and will
+ // succeed for T if and only if the type set of each type parameter of V is a
+ // subset of the type set of the corresponding type parameter of T, meaning
+ // that every instantiation of V corresponds to a valid instantiation of T.
+
+ // Minor optimization: ensure we share a context across the two
+ // instantiations below.
+ if ctxt == nil {
+ ctxt = types.NewContext()
+ }
+
+ var targs []types.Type
+ for i := 0; i < vtparams.Len(); i++ {
+ targs = append(targs, vtparams.At(i))
+ }
+
+ vinst, err := types.Instantiate(ctxt, V, targs, true)
+ if err != nil {
+ panic("type parameters should satisfy their own constraints")
+ }
+
+ tinst, err := types.Instantiate(ctxt, T, targs, true)
+ if err != nil {
+ return false
+ }
+
+ return types.AssignableTo(vinst, tinst)
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
new file mode 100644
index 00000000..6e83c6fb
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -0,0 +1,150 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "fmt"
+ "go/types"
+)
+
+// CoreType returns the core type of T or nil if T does not have a core type.
+//
+// See https://go.dev/ref/spec#Core_types for the definition of a core type.
+func CoreType(T types.Type) types.Type {
+ U := T.Underlying()
+ if _, ok := U.(*types.Interface); !ok {
+ return U // for non-interface types,
+ }
+
+ terms, err := NormalTerms(U)
+ if len(terms) == 0 || err != nil {
+ // len(terms) -> empty type set of interface.
+ // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set.
+ return nil // no core type.
+ }
+
+ U = terms[0].Type().Underlying()
+ var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying())
+ for identical = 1; identical < len(terms); identical++ {
+ if !types.Identical(U, terms[identical].Type().Underlying()) {
+ break
+ }
+ }
+
+ if identical == len(terms) {
+ // https://go.dev/ref/spec#Core_types
+ // "There is a single type U which is the underlying type of all types in the type set of T"
+ return U
+ }
+ ch, ok := U.(*types.Chan)
+ if !ok {
+ return nil // no core type as identical < len(terms) and U is not a channel.
+ }
+ // https://go.dev/ref/spec#Core_types
+ // "the type chan E if T contains only bidirectional channels, or the type chan<- E or
+ // <-chan E depending on the direction of the directional channels present."
+ for chans := identical; chans < len(terms); chans++ {
+ curr, ok := terms[chans].Type().Underlying().(*types.Chan)
+ if !ok {
+ return nil
+ }
+ if !types.Identical(ch.Elem(), curr.Elem()) {
+ return nil // channel elements are not identical.
+ }
+ if ch.Dir() == types.SendRecv {
+ // ch is bidirectional. We can safely always use curr's direction.
+ ch = curr
+ } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() {
+ // ch and curr are not bidirectional and not the same direction.
+ return nil
+ }
+ }
+ return ch
+}
+
+// NormalTerms returns a slice of terms representing the normalized structural
+// type restrictions of a type, if any.
+//
+// For all types other than *types.TypeParam, *types.Interface, and
+// *types.Union, this is just a single term with Tilde() == false and
+// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see
+// below.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration type
+// T[P interface{~int; m()}] int the structural restriction of the type
+// parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// NormalTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, NormalTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the type is
+// invalid, exceeds complexity bounds, or has an empty type set. In the latter
+// case, NormalTerms returns ErrEmptyTypeSet.
+//
+// NormalTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func NormalTerms(typ types.Type) ([]*types.Term, error) {
+ switch typ := typ.Underlying().(type) {
+ case *types.TypeParam:
+ return StructuralTerms(typ)
+ case *types.Union:
+ return UnionTermSet(typ)
+ case *types.Interface:
+ return InterfaceTermSet(typ)
+ default:
+ return []*types.Term{types.NewTerm(false, typ)}, nil
+ }
+}
+
+// Deref returns the type of the variable pointed to by t,
+// if t's core type is a pointer; otherwise it returns t.
+//
+// Do not assume that Deref(T)==T implies T is not a pointer:
+// consider "type T *T", for example.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func Deref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ return t
+}
+
+// MustDeref returns the type of the variable pointed to by t.
+// It panics if t's core type is not a pointer.
+//
+// TODO(adonovan): ideally this would live in typesinternal, but that
+// creates an import cycle. Move there when we melt this package down.
+func MustDeref(t types.Type) types.Type {
+ if ptr, ok := CoreType(t).(*types.Pointer); ok {
+ return ptr.Elem()
+ }
+ panic(fmt.Sprintf("%v is not a pointer", t))
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
new file mode 100644
index 00000000..35810826
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -0,0 +1,118 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "go/types"
+)
+
+// Free is a memoization of the set of free type parameters within a
+// type. It makes a sequence of calls to [Free.Has] for overlapping
+// types more efficient. The zero value is ready for use.
+//
+// NOTE: Adapted from go/types/infer.go. If it is later exported, factor.
+type Free struct {
+ seen map[types.Type]bool
+}
+
+// Has reports whether the specified type has a free type parameter.
+func (w *Free) Has(typ types.Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ if w.seen == nil {
+ w.seen = make(map[types.Type]bool)
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *types.Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *types.Alias:
+ return w.Has(types.Unalias(t))
+
+ case *types.Array:
+ return w.Has(t.Elem())
+
+ case *types.Slice:
+ return w.Has(t.Elem())
+
+ case *types.Struct:
+ for i, n := 0, t.NumFields(); i < n; i++ {
+ if w.Has(t.Field(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Pointer:
+ return w.Has(t.Elem())
+
+ case *types.Tuple:
+ n := t.Len()
+ for i := 0; i < n; i++ {
+ if w.Has(t.At(i).Type()) {
+ return true
+ }
+ }
+
+ case *types.Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather than
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.Has(t.Params()) || w.Has(t.Results())
+
+ case *types.Interface:
+ for i, n := 0, t.NumMethods(); i < n; i++ {
+ if w.Has(t.Method(i).Type()) {
+ return true
+ }
+ }
+ terms, err := InterfaceTermSet(t)
+ if err != nil {
+ return false // ill typed
+ }
+ for _, term := range terms {
+ if w.Has(term.Type()) {
+ return true
+ }
+ }
+
+ case *types.Map:
+ return w.Has(t.Key()) || w.Has(t.Elem())
+
+ case *types.Chan:
+ return w.Has(t.Elem())
+
+ case *types.Named:
+ args := t.TypeArgs()
+ // TODO(taking): this does not match go/types/infer.go. Check with rfindley.
+ if params := t.TypeParams(); params.Len() > args.Len() {
+ return true
+ }
+ for i, n := 0, args.Len(); i < n; i++ {
+ if w.Has(args.At(i)) {
+ return true
+ }
+ }
+ return w.Has(t.Underlying()) // recurse for types local to parameterized functions
+
+ case *types.TypeParam:
+ return true
+
+ default:
+ panic(t) // unreachable
+ }
+
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
new file mode 100644
index 00000000..93c80fdc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -0,0 +1,218 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeparams
+
+import (
+ "errors"
+ "fmt"
+ "go/types"
+ "os"
+ "strings"
+)
+
+//go:generate go run copytermlist.go
+
+const debug = false
+
+var ErrEmptyTypeSet = errors.New("empty type set")
+
+// StructuralTerms returns a slice of terms representing the normalized
+// structural type restrictions of a type parameter, if any.
+//
+// Structural type restrictions of a type parameter are created via
+// non-interface types embedded in its constraint interface (directly, or via a
+// chain of interface embeddings). For example, in the declaration
+//
+// type T[P interface{~int; m()}] int
+//
+// the structural restriction of the type parameter P is ~int.
+//
+// With interface embedding and unions, the specification of structural type
+// restrictions may be arbitrarily complex. For example, consider the
+// following:
+//
+// type A interface{ ~string|~[]byte }
+//
+// type B interface{ int|string }
+//
+// type C interface { ~string|~int }
+//
+// type T[P interface{ A|B; C }] int
+//
+// In this example, the structural type restriction of P is ~string|int: A|B
+// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int,
+// which when intersected with C (~string|~int) yields ~string|int.
+//
+// StructuralTerms computes these expansions and reductions, producing a
+// "normalized" form of the embeddings. A structural restriction is normalized
+// if it is a single union containing no interface terms, and is minimal in the
+// sense that removing any term changes the set of types satisfying the
+// constraint. It is left as a proof for the reader that, modulo sorting, there
+// is exactly one such normalized form.
+//
+// Because the minimal representation always takes this form, StructuralTerms
+// returns a slice of tilde terms corresponding to the terms of the union in
+// the normalized structural restriction. An error is returned if the
+// constraint interface is invalid, exceeds complexity bounds, or has an empty
+// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet.
+//
+// StructuralTerms makes no guarantees about the order of terms, except that it
+// is deterministic.
+func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) {
+ constraint := tparam.Constraint()
+ if constraint == nil {
+ return nil, fmt.Errorf("%s has nil constraint", tparam)
+ }
+ iface, _ := constraint.Underlying().(*types.Interface)
+ if iface == nil {
+ return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying())
+ }
+ return InterfaceTermSet(iface)
+}
+
+// InterfaceTermSet computes the normalized terms for a constraint interface,
+// returning an error if the term set cannot be computed or is empty. In the
+// latter case, the error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) {
+ return computeTermSet(iface)
+}
+
+// UnionTermSet computes the normalized terms for a union, returning an error
+// if the term set cannot be computed or is empty. In the latter case, the
+// error will be ErrEmptyTypeSet.
+//
+// See the documentation of StructuralTerms for more information on
+// normalization.
+func UnionTermSet(union *types.Union) ([]*types.Term, error) {
+ return computeTermSet(union)
+}
+
+func computeTermSet(typ types.Type) ([]*types.Term, error) {
+ tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0)
+ if err != nil {
+ return nil, err
+ }
+ if tset.terms.isEmpty() {
+ return nil, ErrEmptyTypeSet
+ }
+ if tset.terms.isAll() {
+ return nil, nil
+ }
+ var terms []*types.Term
+ for _, term := range tset.terms {
+ terms = append(terms, types.NewTerm(term.tilde, term.typ))
+ }
+ return terms, nil
+}
+
+// A termSet holds the normalized set of terms for a given type.
+//
+// The name termSet is intentionally distinct from 'type set': a type set is
+// all types that implement a type (and includes method restrictions), whereas
+// a term set just represents the structural restrictions on a type.
+type termSet struct {
+ complete bool
+ terms termlist
+}
+
+func indentf(depth int, format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
+}
+
+func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) {
+ if t == nil {
+ panic("nil type")
+ }
+
+ if debug {
+ indentf(depth, "%s", t.String())
+ defer func() {
+ if err != nil {
+ indentf(depth, "=> %s", err)
+ } else {
+ indentf(depth, "=> %s", res.terms.String())
+ }
+ }()
+ }
+
+ const maxTermCount = 100
+ if tset, ok := seen[t]; ok {
+ if !tset.complete {
+ return nil, fmt.Errorf("cycle detected in the declaration of %s", t)
+ }
+ return tset, nil
+ }
+
+ // Mark the current type as seen to avoid infinite recursion.
+ tset := new(termSet)
+ defer func() {
+ tset.complete = true
+ }()
+ seen[t] = tset
+
+ switch u := t.Underlying().(type) {
+ case *types.Interface:
+ // The term set of an interface is the intersection of the term sets of its
+ // embedded types.
+ tset.terms = allTermlist
+ for i := 0; i < u.NumEmbeddeds(); i++ {
+ embedded := u.EmbeddedType(i)
+ if _, ok := embedded.Underlying().(*types.TypeParam); ok {
+ return nil, fmt.Errorf("invalid embedded type %T", embedded)
+ }
+ tset2, err := computeTermSetInternal(embedded, seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ tset.terms = tset.terms.intersect(tset2.terms)
+ }
+ case *types.Union:
+ // The term set of a union is the union of term sets of its terms.
+ tset.terms = nil
+ for i := 0; i < u.Len(); i++ {
+ t := u.Term(i)
+ var terms termlist
+ switch t.Type().Underlying().(type) {
+ case *types.Interface:
+ tset2, err := computeTermSetInternal(t.Type(), seen, depth+1)
+ if err != nil {
+ return nil, err
+ }
+ terms = tset2.terms
+ case *types.TypeParam, *types.Union:
+ // A stand-alone type parameter or union is not permitted as union
+ // term.
+ return nil, fmt.Errorf("invalid union term %T", t)
+ default:
+ if t.Type() == types.Typ[types.Invalid] {
+ continue
+ }
+ terms = termlist{{t.Tilde(), t.Type()}}
+ }
+ tset.terms = tset.terms.union(terms)
+ if len(tset.terms) > maxTermCount {
+ return nil, fmt.Errorf("exceeded max term count %d", maxTermCount)
+ }
+ }
+ case *types.TypeParam:
+ panic("unreachable")
+ default:
+ // For all other types, the term set is just a single non-tilde term
+ // holding the type itself.
+ if u != types.Typ[types.Invalid] {
+ tset.terms = termlist{{false, t}}
+ }
+ }
+ return tset, nil
+}
+
+// under is a facade for the go/types internal function of the same name. It is
+// used by typeterm.go.
+func under(t types.Type) types.Type {
+ return t.Underlying()
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
new file mode 100644
index 00000000..cbd12f80
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -0,0 +1,163 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import (
+ "bytes"
+ "go/types"
+)
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" | ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t types.Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
new file mode 100644
index 00000000..7350bb70
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by copytermlist.go DO NOT EDIT.
+
+package typeparams
+
+import "go/types"
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+type term struct {
+ tilde bool // valid if typ != nil
+ typ types.Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && types.Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t types.Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return types.Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !types.Identical(ux, uy)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go
new file mode 100644
index 00000000..4957f021
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go
@@ -0,0 +1,133 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/types"
+
+ "golang.org/x/tools/go/types/typeutil"
+)
+
+// ForEachElement calls f for type T and each type reachable from its
+// type through reflection. It does this by recursively stripping off
+// type constructors; in addition, for each named type N, the type *N
+// is added to the result as it may have additional methods.
+//
+// The caller must provide an initially empty set used to de-duplicate
+// identical types, potentially across multiple calls to ForEachElement.
+// (Its final value holds all the elements seen, matching the arguments
+// passed to f.)
+//
+// TODO(adonovan): share/harmonize with go/callgraph/rta.
+func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) {
+ var visit func(T types.Type, skip bool)
+ visit = func(T types.Type, skip bool) {
+ if !skip {
+ if seen, _ := rtypes.Set(T, true).(bool); seen {
+ return // de-dup
+ }
+
+ f(T) // notify caller of new element type
+ }
+
+ // Recursion over signatures of each method.
+ tmset := msets.MethodSet(T)
+ for i := 0; i < tmset.Len(); i++ {
+ sig := tmset.At(i).Type().(*types.Signature)
+ // It is tempting to call visit(sig, false)
+ // but, as noted in golang.org/cl/65450043,
+ // the Signature.Recv field is ignored by
+ // types.Identical and typeutil.Map, which
+ // is confusing at best.
+ //
+ // More importantly, the true signature rtype
+ // reachable from a method using reflection
+ // has no receiver but an extra ordinary parameter.
+ // For the Read method of io.Reader we want:
+ // func(Reader, []byte) (int, error)
+ // but here sig is:
+ // func([]byte) (int, error)
+ // with .Recv = Reader (though it is hard to
+ // notice because it doesn't affect Signature.String
+ // or types.Identical).
+ //
+ // TODO(adonovan): construct and visit the correct
+ // non-method signature with an extra parameter
+ // (though since unnamed func types have no methods
+ // there is essentially no actual demand for this).
+ //
+ // TODO(adonovan): document whether or not it is
+ // safe to skip non-exported methods (as RTA does).
+ visit(sig.Params(), true) // skip the Tuple
+ visit(sig.Results(), true) // skip the Tuple
+ }
+
+ switch T := T.(type) {
+ case *types.Alias:
+ visit(types.Unalias(T), skip) // emulates the pre-Alias behavior
+
+ case *types.Basic:
+ // nop
+
+ case *types.Interface:
+ // nop---handled by recursion over method set.
+
+ case *types.Pointer:
+ visit(T.Elem(), false)
+
+ case *types.Slice:
+ visit(T.Elem(), false)
+
+ case *types.Chan:
+ visit(T.Elem(), false)
+
+ case *types.Map:
+ visit(T.Key(), false)
+ visit(T.Elem(), false)
+
+ case *types.Signature:
+ if T.Recv() != nil {
+ panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv()))
+ }
+ visit(T.Params(), true) // skip the Tuple
+ visit(T.Results(), true) // skip the Tuple
+
+ case *types.Named:
+ // A pointer-to-named type can be derived from a named
+ // type via reflection. It may have methods too.
+ visit(types.NewPointer(T), false)
+
+ // Consider 'type T struct{S}' where S has methods.
+ // Reflection provides no way to get from T to struct{S},
+ // only to S, so the method set of struct{S} is unwanted,
+ // so set 'skip' flag during recursion.
+ visit(T.Underlying(), true) // skip the unnamed type
+
+ case *types.Array:
+ visit(T.Elem(), false)
+
+ case *types.Struct:
+ for i, n := 0, T.NumFields(); i < n; i++ {
+ // TODO(adonovan): document whether or not
+ // it is safe to skip non-exported fields.
+ visit(T.Field(i).Type(), false)
+ }
+
+ case *types.Tuple:
+ for i, n := 0, T.Len(); i < n; i++ {
+ visit(T.At(i).Type(), false)
+ }
+
+ case *types.TypeParam, *types.Union:
+ // forEachReachable must not be called on parameterized types.
+ panic(T)
+
+ default:
+ panic(T)
+ }
+ }
+ visit(T, false)
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 834e0538..131caab2 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -838,7 +838,7 @@ const (
// InvalidCap occurs when an argument to the cap built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -859,7 +859,7 @@ const (
// InvalidCopy occurs when the arguments are not of slice type or do not
// have compatible type.
//
- // See https://golang.org/ref/spec#Appendingand_copying_slices for more
+ // See https://golang.org/ref/spec#Appending_and_copying_slices for more
// information on the type requirements for the copy built-in.
//
// Example:
@@ -897,7 +897,7 @@ const (
// InvalidLen occurs when an argument to the len built-in function is not of
// supported type.
//
- // See https://golang.org/ref/spec#Lengthand_capacity for information on
+ // See https://golang.org/ref/spec#Length_and_capacity for information on
// which underlying types are supported as arguments to cap and len.
//
// Example:
@@ -914,7 +914,7 @@ const (
// InvalidMake occurs when make is called with an unsupported type argument.
//
- // See https://golang.org/ref/spec#Makingslices_maps_and_channels for
+ // See https://golang.org/ref/spec#Making_slices_maps_and_channels for
// information on the types that may be created using make.
//
// Example:
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index fea7c8b7..ba6f4f4e 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -6,8 +6,6 @@ package typesinternal
import (
"go/types"
-
- "golang.org/x/tools/internal/aliases"
)
// ReceiverNamed returns the named type (if any) associated with the
@@ -15,11 +13,11 @@ import (
// It also reports whether a Pointer was present.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
- if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
isPtr = true
t = ptr.Elem()
}
- named, _ = aliases.Unalias(t).(*types.Named)
+ named, _ = types.Unalias(t).(*types.Named)
return
}
@@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
// indirection from the type, regardless of named types (analogous to
// a LOAD instruction).
func Unpointer(t types.Type) types.Type {
- if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok {
+ if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
return ptr.Elem()
}
return t
diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go
new file mode 100644
index 00000000..179063d4
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/versions/constraint.go
@@ -0,0 +1,13 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package versions
+
+import "go/build/constraint"
+
+// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+).
+// Otherwise nil.
+//
+// Deprecate once x/tools is after go1.21.
+var ConstraintGoVersion func(x constraint.Expr) string
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
similarity index 75%
rename from vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
rename to vendor/golang.org/x/tools/internal/versions/constraint_go121.go
index b7ef216d..38011407 100644
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go
+++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
@@ -7,8 +7,8 @@
package versions
+import "go/build/constraint"
+
func init() {
- if Compare(toolchain, Go1_21) < 0 {
- toolchain = Go1_21
- }
+ ConstraintGoVersion = constraint.GoVersion
}
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go
deleted file mode 100644
index 377bf7a5..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-// toolchain is maximum version (<1.22) that the go toolchain used
-// to build the current tool is known to support.
-//
-// When a tool is built with >=1.22, the value of toolchain is unused.
-//
-// x/tools does not support building with go <1.18. So we take this
-// as the minimum possible maximum.
-var toolchain string = Go1_18
diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
deleted file mode 100644
index f65beed9..00000000
--- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.19
-// +build go1.19
-
-package versions
-
-func init() {
- if Compare(toolchain, Go1_19) < 0 {
- toolchain = Go1_19
- }
-}
diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go
index 562eef21..f0bb0d15 100644
--- a/vendor/golang.org/x/tools/internal/versions/types.go
+++ b/vendor/golang.org/x/tools/internal/versions/types.go
@@ -5,15 +5,34 @@
package versions
import (
+ "go/ast"
"go/types"
)
-// GoVersion returns the Go version of the type package.
-// It returns zero if no version can be determined.
-func GoVersion(pkg *types.Package) string {
- // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25.
- if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok {
- return pkg.GoVersion()
+// FileVersion returns a file's Go version.
+// The reported version is an unknown Future version if a
+// version cannot be determined.
+func FileVersion(info *types.Info, file *ast.File) string {
+ // In tools built with Go >= 1.22, the Go version of a file
+ // follow a cascades of sources:
+ // 1) types.Info.FileVersion, which follows the cascade:
+ // 1.a) file version (ast.File.GoVersion),
+ // 1.b) the package version (types.Config.GoVersion), or
+ // 2) is some unknown Future version.
+ //
+ // File versions require a valid package version to be provided to types
+ // in Config.GoVersion. Config.GoVersion is either from the package's module
+ // or the toolchain (go run). This value should be provided by go/packages
+ // or unitchecker.Config.GoVersion.
+ if v := info.FileVersions[file]; IsValid(v) {
+ return v
}
- return ""
+ // Note: we could instead return runtime.Version() [if valid].
+ // This would act as a max version on what a tool can support.
+ return Future
+}
+
+// InitFileVersions initializes info to record Go versions for Go files.
+func InitFileVersions(info *types.Info) {
+ info.FileVersions = make(map[*ast.File]string)
}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go
deleted file mode 100644
index b4345d33..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go121.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.22
-// +build !go1.22
-
-package versions
-
-import (
- "go/ast"
- "go/types"
-)
-
-// FileVersion returns a language version (<=1.21) derived from runtime.Version()
-// or an unknown future version.
-func FileVersion(info *types.Info, file *ast.File) string {
- // In x/tools built with Go <= 1.21, we do not have Info.FileVersions
- // available. We use a go version derived from the toolchain used to
- // compile the tool by default.
- // This will be <= go1.21. We take this as the maximum version that
- // this tool can support.
- //
- // There are no features currently in x/tools that need to tell fine grained
- // differences for versions <1.22.
- return toolchain
-}
-
-// InitFileVersions is a noop when compiled with this Go version.
-func InitFileVersions(*types.Info) {}
diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go
deleted file mode 100644
index aac5db62..00000000
--- a/vendor/golang.org/x/tools/internal/versions/types_go122.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.22
-// +build go1.22
-
-package versions
-
-import (
- "go/ast"
- "go/types"
-)
-
-// FileVersion returns a file's Go version.
-// The reported version is an unknown Future version if a
-// version cannot be determined.
-func FileVersion(info *types.Info, file *ast.File) string {
- // In tools built with Go >= 1.22, the Go version of a file
- // follow a cascades of sources:
- // 1) types.Info.FileVersion, which follows the cascade:
- // 1.a) file version (ast.File.GoVersion),
- // 1.b) the package version (types.Config.GoVersion), or
- // 2) is some unknown Future version.
- //
- // File versions require a valid package version to be provided to types
- // in Config.GoVersion. Config.GoVersion is either from the package's module
- // or the toolchain (go run). This value should be provided by go/packages
- // or unitchecker.Config.GoVersion.
- if v := info.FileVersions[file]; IsValid(v) {
- return v
- }
- // Note: we could instead return runtime.Version() [if valid].
- // This would act as a max version on what a tool can support.
- return Future
-}
-
-// InitFileVersions initializes info to record Go versions for Go files.
-func InitFileVersions(info *types.Info) {
- info.FileVersions = make(map[*ast.File]string)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c..024ffebd 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
//
// Each variable is populated with a nil pointer to the options struct.
var (
- File pref.ProtoMessage
- Enum pref.ProtoMessage
- EnumValue pref.ProtoMessage
- Message pref.ProtoMessage
- Field pref.ProtoMessage
- Oneof pref.ProtoMessage
- ExtensionRange pref.ProtoMessage
- Service pref.ProtoMessage
- Method pref.ProtoMessage
+ File protoreflect.ProtoMessage
+ Enum protoreflect.ProtoMessage
+ EnumValue protoreflect.ProtoMessage
+ Message protoreflect.ProtoMessage
+ Field protoreflect.ProtoMessage
+ Oneof protoreflect.ProtoMessage
+ ExtensionRange protoreflect.ProtoMessage
+ Service protoreflect.ProtoMessage
+ Method protoreflect.ProtoMessage
)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40..fa790e0f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -258,6 +258,7 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
+ IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool {
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -425,6 +427,7 @@ type (
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
+ IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
@@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool {
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b..d2f54949 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ xd.L1.IsLazy = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8..67a51b32 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356..fd4d0c83 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+ case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd012..d9b9d916 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
// and the well-known types.
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b..7f67cbb6 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,25 @@ import (
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
- GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+ GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
- GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
)
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
)
+
+// Extension numbers
+const (
+ FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02f..bef5a25f 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b8..9404270d 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a2..0d5b546e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
- fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
- f.lazy.value = f.lazy.fn()
+ panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
- f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
- f.typ = t
- f.lazy = &lazyExtensionValue{fn: fn}
-}
-
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e4..7c1f66c8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
+ if cf.funcs.isInit == nil {
+ out.initialized = true
+ }
vi.Set(vw)
return out, nil
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb7..78be9df3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
}
// getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577b..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
- v := p.v.Elem().Int()
- return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- v := p.v.Elem().Int()
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(v))
- return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- p.v.Elem().SetInt(int64(v))
- out.n = n
- return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
- size: sizeEnum,
- marshal: appendEnum,
- unmarshal: consumeEnum,
- merge: mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- if p.v.Elem().Int() == 0 {
- return 0
- }
- return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- if p.v.Elem().Int() == 0 {
- return b, nil
- }
- return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if src.v.Elem().Int() != 0 {
- dst.v.Elem().Set(src.v.Elem())
- }
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
- size: sizeEnumNoZero,
- marshal: appendEnumNoZero,
- unmarshal: consumeEnum,
- merge: mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- if p.v.Elem().IsNil() {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
- }
- return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if !src.v.Elem().IsNil() {
- v := reflect.New(dst.v.Type().Elem().Elem())
- v.Elem().Set(src.v.Elem().Elem())
- dst.v.Elem().Set(v)
- }
-}
-
-var coderEnumPtr = pointerCoderFuncs{
- size: sizeEnumPtr,
- marshal: appendEnumPtr,
- unmarshal: consumeEnumPtr,
- merge: mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
- }
- return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- s := p.v.Elem()
- if wtyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return out, errDecode
- }
- for len(b) > 0 {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- b = b[n:]
- }
- out.n = n
- return out, nil
- }
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- out.n = n
- return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
- size: sizeEnumSlice,
- marshal: appendEnumSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return 0
- }
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return b, nil
- }
- b = protowire.AppendVarint(b, f.wiretag)
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- b = protowire.AppendVarint(b, uint64(n))
- for i := 0; i < llen; i++ {
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
- size: sizeEnumPackedSlice,
- marshal: appendEnumPackedSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e2..077712c2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
// When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55..f72ddd88 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
- // pref.Value.String never panics, so we go through an interface
+ // protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd2122..6254f5de 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,7 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
- proto "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 00000000..9f6c32a7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+ return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+ if mx == nil || my == nil {
+ return mx == my
+ }
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ msx, ok := mx.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ msy, ok := my.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+
+ mi := msx.messageInfo()
+ miy := msy.messageInfo()
+ if mi != miy {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ mi.init()
+ // Compares regular fields
+ // Modified Message.Range code that compares two messages of the same type
+ // while going over the fields.
+ for _, ri := range mi.rangeInfos {
+ var fd protoreflect.FieldDescriptor
+ var vx, vy protoreflect.Value
+
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ hx := ri.has(msx.pointer())
+ hy := ri.has(msy.pointer())
+ if hx != hy {
+ return false
+ }
+ if !hx {
+ continue
+ }
+ fd = ri.fieldDesc
+ vx = ri.get(msx.pointer())
+ vy = ri.get(msy.pointer())
+ case *oneofInfo:
+ fnx := ri.which(msx.pointer())
+ fny := ri.which(msy.pointer())
+ if fnx != fny {
+ return false
+ }
+ if fnx <= 0 {
+ continue
+ }
+ fi := mi.fields[fnx]
+ fd = fi.fieldDesc
+ vx = fi.get(msx.pointer())
+ vy = fi.get(msy.pointer())
+ }
+
+ if !equalValue(fd, vx, vy) {
+ return false
+ }
+ }
+
+ // Compare extensions.
+ // This is more complicated because mx or my could have empty/nil extension maps,
+ // however some populated extension map values are equal to nil extension maps.
+ emx := mi.extensionMap(msx.pointer())
+ emy := mi.extensionMap(msy.pointer())
+ if emx != nil {
+ for k, x := range *emx {
+ xd := x.Type().TypeDescriptor()
+ xv := x.Value()
+ var y ExtensionField
+ ok := false
+ if emy != nil {
+ y, ok = (*emy)[k]
+ }
+ // We need to treat empty lists as equal to nil values
+ if emy == nil || !ok {
+ if xd.IsList() && xv.List().Len() == 0 {
+ continue
+ }
+ return false
+ }
+
+ if !equalValue(xd, xv, y.Value()) {
+ return false
+ }
+ }
+ }
+ if emy != nil {
+ // emy may have extensions emx does not have, need to check them as well
+ for k, y := range *emy {
+ if emx != nil {
+ // emx has the field, so we already checked it
+ if _, ok := (*emx)[k]; ok {
+ continue
+ }
+ }
+ // Empty lists are equal to nil
+ if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+ continue
+ }
+
+ // Cant be equal if the extension is populated
+ return false
+ }
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+ // slow path
+ if fd.Kind() != protoreflect.MessageKind {
+ return vx.Equal(vy)
+ }
+
+ // fast path special cases
+ if fd.IsMap() {
+ if fd.MapValue().Kind() == protoreflect.MessageKind {
+ return equalMessageMap(vx.Map(), vy.Map())
+ }
+ return vx.Equal(vy)
+ }
+
+ if fd.IsList() {
+ return equalMessageList(vx.List(), vy.List())
+ }
+
+ return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+ if mx.Len() != my.Len() {
+ return false
+ }
+ equal := true
+ mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ if !my.Has(k) {
+ equal = false
+ return false
+ }
+ vy := my.Get(k)
+ equal = equalMessage(vx.Message(), vy.Message())
+ return equal
+ })
+ return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+ if lx.Len() != ly.Len() {
+ return false
+ }
+ for i := 0; i < lx.Len(); i++ {
+ // We only operate on messages here since equalImpl will not call us in any other case.
+ if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ if len(mx) != len(my) {
+ return false
+ }
+
+ for k, v1 := range mx {
+ if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee..b6849d66 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d4..741b5ed2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
- // Exporter must be provided in a purego environment in order to provide
- // access to unexported fields.
+ // Deprecated: Exporter will be removed the next time we bump
+ // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
- index int
- export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
- if len(f.Index) != 1 {
- panic("embedded structs are not supported")
- }
- if f.PkgPath == "" {
- return offset{index: f.Index[0]} // field is already exported
- }
- if x == nil {
- panic("exporter must be provided for unexported field")
- }
- return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
- return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
- return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
- return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
- if f.export != nil {
- if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
- return pointer{v: v}
- }
- }
- return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
- if got := p.v.Type().Elem(); got != t {
- panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
- }
- return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
- return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
- return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
- return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
- // TODO: reconsider this
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
- sp := p.v.Elem()
- sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
- p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
-func (ms *messageState) pointer() pointer { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
- once sync.Once
- m messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
- m.once.Do(func() {
- m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
- m.m.mi = mi
- })
- return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d..79e18666 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
import (
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f333..00000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
- return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
- return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd0..832a7988 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2b..1ffddf68 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package strs
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f68..fb8e15e8 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 34
- Patch = 2
+ Minor = 35
+ Patch = 1
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b0..c36d4a9c 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
+
+ // Only one of the messages needs to implement the fast-path for it to work.
+ pmx := protoMethods(mx)
+ pmy := protoMethods(my)
+ if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+ return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+ }
+
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index d248f292..78445d11 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+// ... // make use of mm
+// }
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6e..742cb518 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
Unmarshal func(unmarshalInput) (unmarshalOutput, error)
Merge func(mergeInput) mergeOutput
CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ Equal func(equalInput) equalOutput
}
supportFlags = uint64
sizeInput = struct {
@@ -75,4 +76,13 @@ type (
checkInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+ equalInput = struct {
+ pragma.NoUnkeyedLiterals
+ MessageA Message
+ MessageB Message
+ }
+ equalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Equal bool
+ }
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2a..00000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
- nilType valueType = iota
- boolType
- int32Type
- int64Type
- uint32Type
- uint64Type
- float32Type
- float64Type
- stringType
- bytesType
- enumType
- ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
- pragma.DoNotCompare // 0B
-
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface any // 16B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
- return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
- return v.str
-}
-func (v Value) getBytes() []byte {
- return v.bin
-}
-func (v Value) getIface() any {
- return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 7f3583ea..0015fcb3 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index f7d38699..479527b5 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
package protoreflect
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d..24615656 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
// CheckInitialized returns an error if any required fields in the message are not set.
CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+ // Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+ Equal func(EqualInput) EqualOutput
}
// SupportFlags indicate support for optional features.
@@ -166,3 +169,18 @@ type CheckInitializedInput = struct {
type CheckInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ MessageA protoreflect.Message
+ MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 83a5a645..0d20722d 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
func (x *Timestamp) Reset() {
*x = Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
func (x *Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() {
if File_google_protobuf_timestamp_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ff1e8cfe..792d3807 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,5 +1,5 @@
-# github.com/99designs/gqlgen v0.17.49
-## explicit; go 1.20
+# github.com/99designs/gqlgen v0.17.55
+## explicit; go 1.22.5
github.com/99designs/gqlgen
github.com/99designs/gqlgen/api
github.com/99designs/gqlgen/codegen
@@ -27,14 +27,14 @@ github.com/99designs/gqlgen/plugin/servergen
# github.com/KyleBanks/depth v1.2.1
## explicit
github.com/KyleBanks/depth
-# github.com/Masterminds/semver/v3 v3.2.1
-## explicit; go 1.18
+# github.com/Masterminds/semver/v3 v3.3.0
+## explicit; go 1.21
github.com/Masterminds/semver/v3
-# github.com/adhocore/gronx v1.8.1
+# github.com/adhocore/gronx v1.19.1
## explicit; go 1.13
github.com/adhocore/gronx
-# github.com/agnivade/levenshtein v1.1.1
-## explicit; go 1.13
+# github.com/agnivade/levenshtein v1.2.0
+## explicit; go 1.21
github.com/agnivade/levenshtein
# github.com/andybalholm/brotli v1.1.0
## explicit; go 1.13
@@ -55,9 +55,10 @@ github.com/beorn7/perks/quantile
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
-# github.com/caddyserver/certmagic v0.21.3
+# github.com/caddyserver/certmagic v0.21.4
## explicit; go 1.21.0
github.com/caddyserver/certmagic
+github.com/caddyserver/certmagic/internal/atomicfile
# github.com/caddyserver/zerossl v0.1.3
## explicit; go 1.21.0
github.com/caddyserver/zerossl
@@ -67,7 +68,7 @@ github.com/cespare/xxhash/v2
# github.com/cpuguy83/go-md2man/v2 v2.0.4
## explicit; go 1.11
github.com/cpuguy83/go-md2man/v2/md2man
-# github.com/datarhei/gosrt v0.6.0
+# github.com/datarhei/gosrt v0.7.0
## explicit; go 1.20
github.com/datarhei/gosrt
github.com/datarhei/gosrt/circular
@@ -106,6 +107,13 @@ github.com/datarhei/joy4/utils/bits/pio
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
+# github.com/dolthub/maphash v0.1.0
+## explicit; go 1.18
+github.com/dolthub/maphash
+# github.com/dolthub/swiss v0.2.1
+## explicit; go 1.18
+github.com/dolthub/swiss
+github.com/dolthub/swiss/simd
# github.com/dustin/go-humanize v1.0.1
## explicit; go 1.16
github.com/dustin/go-humanize
@@ -115,7 +123,7 @@ github.com/fatih/color
# github.com/fujiwara/shapeio v1.0.0
## explicit; go 1.16
github.com/fujiwara/shapeio
-# github.com/gabriel-vasile/mimetype v1.4.4
+# github.com/gabriel-vasile/mimetype v1.4.5
## explicit; go 1.20
github.com/gabriel-vasile/mimetype
github.com/gabriel-vasile/mimetype/internal/charset
@@ -151,7 +159,7 @@ github.com/go-playground/locales/currency
# github.com/go-playground/universal-translator v0.18.1
## explicit; go 1.18
github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.22.0
+# github.com/go-playground/validator/v10 v10.22.1
## explicit; go 1.18
github.com/go-playground/validator/v10
# github.com/gobwas/glob v0.2.3
@@ -212,7 +220,7 @@ github.com/hashicorp/golang-lru/simplelru
github.com/hashicorp/golang-lru/v2
github.com/hashicorp/golang-lru/v2/internal
github.com/hashicorp/golang-lru/v2/simplelru
-# github.com/hashicorp/raft v1.7.0
+# github.com/hashicorp/raft v1.7.1
## explicit; go 1.20
github.com/hashicorp/raft
# github.com/hashicorp/raft-boltdb/v2 v2.3.0
@@ -231,8 +239,8 @@ github.com/joho/godotenv/autoload
# github.com/josharian/intern v1.0.0
## explicit; go 1.5
github.com/josharian/intern
-# github.com/klauspost/compress v1.17.9
-## explicit; go 1.20
+# github.com/klauspost/compress v1.17.10
+## explicit; go 1.21
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
@@ -260,17 +268,16 @@ github.com/labstack/gommon/log
## explicit; go 1.18
github.com/leodido/go-urn
github.com/leodido/go-urn/scim/schema
-# github.com/lestrrat-go/strftime v1.0.6
-## explicit; go 1.13
+# github.com/lestrrat-go/strftime v1.1.0
+## explicit; go 1.21
github.com/lestrrat-go/strftime
-github.com/lestrrat-go/strftime/internal/errors
# github.com/libdns/libdns v0.2.2
## explicit; go 1.18
github.com/libdns/libdns
# github.com/lithammer/shortuuid/v4 v4.0.0
## explicit; go 1.13
github.com/lithammer/shortuuid/v4
-# github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae
+# github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683
## explicit; go 1.16
github.com/lufia/plan9stats
# github.com/mailru/easyjson v0.7.7
@@ -284,19 +291,20 @@ github.com/mattn/go-colorable
# github.com/mattn/go-isatty v0.0.20
## explicit; go 1.15
github.com/mattn/go-isatty
-# github.com/mholt/acmez/v2 v2.0.1
-## explicit; go 1.20
+# github.com/mholt/acmez/v2 v2.0.3
+## explicit; go 1.21.0
github.com/mholt/acmez/v2
github.com/mholt/acmez/v2/acme
-# github.com/miekg/dns v1.1.61
+# github.com/miekg/dns v1.1.62
## explicit; go 1.19
github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.74
+# github.com/minio/minio-go/v7 v7.0.77
## explicit; go 1.21
github.com/minio/minio-go/v7
+github.com/minio/minio-go/v7/pkg/cors
github.com/minio/minio-go/v7/pkg/credentials
github.com/minio/minio-go/v7/pkg/encrypt
github.com/minio/minio-go/v7/pkg/lifecycle
@@ -313,28 +321,24 @@ github.com/mitchellh/mapstructure
# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
github.com/munnerz/goautoneg
-# github.com/pkg/errors v0.9.1
-## explicit
-github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
## explicit
github.com/pmezard/go-difflib/difflib
# github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55
## explicit; go 1.14
github.com/power-devops/perfstat
-# github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
-## explicit
-github.com/prep/average
-# github.com/prometheus/client_golang v1.19.1
+# github.com/prometheus/client_golang v1.20.4
## explicit; go 1.20
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.6.1
## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.55.0
-## explicit; go 1.20
+# github.com/prometheus/common v0.60.0
+## explicit; go 1.21
github.com/prometheus/common/expfmt
github.com/prometheus/common/model
# github.com/prometheus/procfs v0.15.1
@@ -345,8 +349,8 @@ github.com/prometheus/procfs/internal/util
# github.com/puzpuzpuz/xsync/v3 v3.4.0
## explicit; go 1.18
github.com/puzpuzpuz/xsync/v3
-# github.com/rs/xid v1.5.0
-## explicit; go 1.12
+# github.com/rs/xid v1.6.0
+## explicit; go 1.16
github.com/rs/xid
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
@@ -382,10 +386,10 @@ github.com/swaggo/swag
# github.com/tklauser/go-sysconf v0.3.14
## explicit; go 1.18
github.com/tklauser/go-sysconf
-# github.com/tklauser/numcpus v0.8.0
+# github.com/tklauser/numcpus v0.9.0
## explicit; go 1.18
github.com/tklauser/numcpus
-# github.com/urfave/cli/v2 v2.27.2
+# github.com/urfave/cli/v2 v2.27.4
## explicit; go 1.18
github.com/urfave/cli/v2
# github.com/valyala/bytebufferpool v1.0.0
@@ -394,7 +398,7 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
-# github.com/vektah/gqlparser/v2 v2.5.16
+# github.com/vektah/gqlparser/v2 v2.5.17
## explicit; go 1.19
github.com/vektah/gqlparser/v2
github.com/vektah/gqlparser/v2/ast
@@ -412,14 +416,14 @@ github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.2.0
## explicit
github.com/xeipuuv/gojsonschema
-# github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913
-## explicit; go 1.15.0
+# github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1
+## explicit; go 1.15
github.com/xrash/smetrics
# github.com/yusufpapurcu/wmi v1.2.4
## explicit; go 1.16
github.com/yusufpapurcu/wmi
-# github.com/zeebo/blake3 v0.2.3
-## explicit; go 1.13
+# github.com/zeebo/blake3 v0.2.4
+## explicit; go 1.18
github.com/zeebo/blake3
github.com/zeebo/blake3/internal/alg
github.com/zeebo/blake3/internal/alg/compress
@@ -430,11 +434,11 @@ github.com/zeebo/blake3/internal/alg/hash/hash_avx2
github.com/zeebo/blake3/internal/alg/hash/hash_pure
github.com/zeebo/blake3/internal/consts
github.com/zeebo/blake3/internal/utils
-# go.etcd.io/bbolt v1.3.10
-## explicit; go 1.21
+# go.etcd.io/bbolt v1.3.11
+## explicit; go 1.22
go.etcd.io/bbolt
-# go.uber.org/automaxprocs v1.5.3
-## explicit; go 1.18
+# go.uber.org/automaxprocs v1.6.0
+## explicit; go 1.20
go.uber.org/automaxprocs/internal/cgroups
go.uber.org/automaxprocs/internal/runtime
go.uber.org/automaxprocs/maxprocs
@@ -452,7 +456,7 @@ go.uber.org/zap/internal/exit
go.uber.org/zap/internal/pool
go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.25.0
+# golang.org/x/crypto v0.28.0
## explicit; go 1.20
golang.org/x/crypto/acme
golang.org/x/crypto/acme/autocert
@@ -464,12 +468,12 @@ golang.org/x/crypto/ocsp
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
golang.org/x/crypto/sha3
-# golang.org/x/mod v0.19.0
-## explicit; go 1.18
+# golang.org/x/mod v0.21.0
+## explicit; go 1.22.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.27.0
+# golang.org/x/net v0.30.0
## explicit; go 1.18
golang.org/x/net/bpf
golang.org/x/net/html
@@ -484,16 +488,16 @@ golang.org/x/net/internal/socket
golang.org/x/net/ipv4
golang.org/x/net/ipv6
golang.org/x/net/publicsuffix
-# golang.org/x/sync v0.7.0
+# golang.org/x/sync v0.8.0
## explicit; go 1.18
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.22.0
+# golang.org/x/sys v0.26.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/text v0.16.0
+# golang.org/x/text v0.19.0
## explicit; go 1.18
golang.org/x/text/cases
golang.org/x/text/internal
@@ -505,11 +509,11 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.5.0
+# golang.org/x/time v0.7.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.23.0
-## explicit; go 1.19
+# golang.org/x/tools v0.26.0
+## explicit; go 1.22.0
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/buildutil
golang.org/x/tools/go/gcexportdata
@@ -517,6 +521,7 @@ golang.org/x/tools/go/internal/cgo
golang.org/x/tools/go/loader
golang.org/x/tools/go/packages
golang.org/x/tools/go/types/objectpath
+golang.org/x/tools/go/types/typeutil
golang.org/x/tools/imports
golang.org/x/tools/internal/aliases
golang.org/x/tools/internal/event
@@ -530,11 +535,11 @@ golang.org/x/tools/internal/imports
golang.org/x/tools/internal/packagesinternal
golang.org/x/tools/internal/pkgbits
golang.org/x/tools/internal/stdlib
-golang.org/x/tools/internal/tokeninternal
+golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# google.golang.org/protobuf v1.34.2
-## explicit; go 1.20
+# google.golang.org/protobuf v1.35.1
+## explicit; go 1.21
google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire